def load_augmentation_settings(handler_save_path: str):

    if not file_exists(handler_save_path):
        handler = AugHandler(
            [
        aug.Affine(
            scale = {"x": tuple([0.8, 1]), "y":tuple([0.8, 1])},
            translate_percent= {"x": tuple([0.1, 0.11]), "y":tuple([0.1, 0.11])},
            rotate= [-180, 180],
            order= [0, 0],
            cval= [0, 0],
            shear= [0,0],
            frequency= 0.6).change_rotate_to_right_angle(),

        aug.GaussianBlur(sigma=(0, 1.5),frequency=0.5),
        #aug.Crop(percent=[0, 0.1], frequency= 0.5),
        #aug.Flipud(p=0.5),
        aug.Sharpen(alpha=[0,0.5], lightness=[0.8,1], frequency= 0.5),
        aug.Emboss(alpha=[0,0.5], strength=[0.8,1], frequency=0.5),
        aug.AdditiveGaussianNoise(loc= 0, scale=[0,24.75], per_channel=1,frequency=0.8),
        aug.Add(value=[-20,20], per_channel=True, frequency= 0.5),
        aug.LinearContrast(alpha=[0.6,1.4], per_channel=True,frequency= 0.5),
        aug.Dropout(p=[0,0.01], per_channel=True),
        aug.AverageBlur(k=[1,2], frequency = 0.5),
        aug.MotionBlur(k=[3,5], angle=[0,360], frequency = 0.5),
        
        aug.Fliplr(p=0.5, frequency=0.7)
            ]
        )
        handler.save_to_path(save_path=handler_save_path, overwrite=True)
    else:
        handler = AugHandler.load_from_path(handler_save_path)

    return handler
Example #2
0
 def save_to_txt(self, save_path: str, overwrite: bool = False):
     if file_exists(save_path) and not overwrite:
         raise FileExistsError(f"""
             File already exists at {save_path}
             Hint: Use overwrite=True to save anyway.
             """)
     np.savetxt(fname=save_path, X=self.to_matrix())
    def save(self, img: np.ndarray, bbox_list: list, kpt_list_list: list,
             kpt_skeleton_list: list, kpt_label_list_list: list,
             seg_list: list, cat_name_list: list, file_name: str):
        result = img.copy()
        save_path = f"{self.visualization_dump_dir}/{file_name}"
        retry_count = 0
        while file_exists(save_path):
            rootname = get_rootname_from_filename(file_name)
            extension = get_extension_from_filename(file_name)
            save_path = f"{self.visualization_dump_dir}/{rootname}_{retry_count}.{extension}"
            if retry_count == 9:
                logger.error(f"Can't resolve save_path.")
                raise Exception
            retry_count += 1

        for bbox, kpt_list, kpt_skeleton, kpt_label_list, seg, cat_name in \
            zip(bbox_list, kpt_list_list, kpt_skeleton_list, kpt_label_list_list, seg_list, cat_name_list):
            result = self._draw(img=result,
                                bbox=bbox,
                                kpt_list=kpt_list,
                                kpt_skeleton=kpt_skeleton,
                                kpt_label_list=kpt_label_list,
                                seg=seg,
                                cat_name=cat_name)
        cv2.imwrite(filename=save_path, img=result)
        logger.info(f"Wrote {save_path}")
        self.viz_count += 1
Example #4
0
 def dump_save_dict(self, save_path: str, overwrite: bool = False):
     if file_exists(save_path) and not overwrite:
         raise Error(f'File already exists at save_path: {save_path}')
     json.dump(self.save_dict,
               open(save_path, 'w'),
               indent=2,
               ensure_ascii=False)
Example #5
0
 def save_to_path(self: T, save_path: str, overwrite: bool = False):
     if file_exists(save_path) and not overwrite:
         logger.error(f'File already exists at save_path: {save_path}')
         raise Exception
     json_dict = self.to_dict()
     json.dump(json_dict,
               open(save_path, 'w'),
               indent=2,
               ensure_ascii=False)
Example #6
0
 def save_to_path(self: T, save_path: str, overwrite: bool = False):
     if file_exists(save_path) and not overwrite:
         logger.error(f'File already exists at save_path: {save_path}')
         raise Exception
     elif file_exists(save_path) and overwrite:
         json_dict_list = json.load(open(save_path, 'r'))
         json_dict_list = [
             item for item in json_dict_list
             if item['class_name'] != self.class_name
         ]
         json_dict_list.append(self.to_dict())
     else:
         json_dict = self.to_dict()
         json_dict_list = [json_dict]
     json.dump(json_dict_list,
               open(save_path, 'w'),
               indent=2,
               ensure_ascii=False)
Example #7
0
 def save_to_path(self, save_path: str, overwrite: bool=False, img_path: str=None):
     if file_exists(save_path) and not overwrite:
         logger.error(f'File already exists at save_path: {save_path}')
         raise Exception
     if img_path is not None:
         import os
         self.img_path = os.path.relpath(path=img_path, start=get_dirpath_from_filepath(save_path))
     json_dict = self.to_dict()
     json.dump(json_dict, open(save_path, 'w'), indent=2, ensure_ascii=False)
Example #8
0
 def premove_check(self, img_src_path: str, img_dst_path: str,
                   ann_src_path: str, ann_dst_path: str):
     paths_not_found = []
     paths_already_exists = []
     if not file_exists(img_src_path):
         paths_not_found.append(img_src_path)
     if file_exists(img_dst_path):
         paths_already_exists.append(img_dst_path)
     if not file_exists(ann_src_path):
         paths_not_found.append(ann_src_path)
     if file_exists(ann_dst_path):
         paths_already_exists.append(ann_dst_path)
     if len(paths_not_found) > 0:
         for path_not_found in paths_not_found:
             logger.error(f"File not found: {path_not_found}")
         raise Exception
     if len(paths_already_exists) > 0:
         for path_already_exists in paths_already_exists:
             logger.error(f"File already exists: {path_already_exists}")
         raise Exception
Example #9
0
 def check_ann_exists_in_img(self, img_pathlist: list, ann_pathlist: list,
                             img_dir_path: str):
     paths_not_found = []
     for ann_path in ann_pathlist:
         rootname = get_rootname_from_path(ann_path)
         corresponding_img_path = f"{img_dir_path}/{rootname}.png"
         if not file_exists(corresponding_img_path):
             paths_not_found.append(corresponding_img_path)
     if len(paths_not_found) > 0:
         for path_not_found in paths_not_found:
             logger.error(f"File not found: {path_not_found}")
         raise Exception
Example #10
0
 def __init__(self, src_list: List[str], scale_factor_list: List[float]=None):
     for src in src_list:
         if isinstance(src, int):
             pass
         elif isinstance(src, str):
             assert file_exists(src)
     if scale_factor_list is not None:
         assert len(scale_factor_list) == len(src_list)
     else:
         scale_factor_list = [1.0]*len(src_list)
     self.src_list = src_list
     self.streamer_list = [Streamer(src=src, scale_factor=scale_factor) for src, scale_factor in zip(src_list, scale_factor_list)]
Example #11
0
def load_augmentation_settings(handler_save_path: str):

    if not file_exists(handler_save_path):
        handler = AugHandler(
            [
                aug.Crop(percent=[0.2, 0.5]),
                aug.Affine(scale = {"x": tuple([0.8, 1.2]), "y":tuple([0.8, 1.2])}, translate_percent= {"x": tuple([0, 0]), "y":tuple([0, 0])}, rotate= [0, 0], order= [0, 0], cval= [0, 0], shear= [0,0], fit_output=True)
            ]
        )
        handler.save_to_path(save_path=handler_save_path, overwrite=True)
    else:
        handler = AugHandler.load_from_path(handler_save_path)

    return handler
Example #12
0
 def save_to_path(self, save_path: str, overwrite: bool = False):
     if file_exists(save_path) and not overwrite:
         logger.error(f'File already exists at: {save_path}')
         logger.error(f'Use overwrite=True to overwrite.')
         raise Exception
     extension = get_extension_from_path(save_path)
     if extension == 'json':
         json.dump(self.to_dict(),
                   open(save_path, 'w'),
                   indent=2,
                   ensure_ascii=False)
     elif extension == 'yaml':
         yaml.dump(self.to_dict(), open(save_path, 'w'), allow_unicode=True)
     else:
         logger.error(f'Invalid file extension encountered: {extension}')
         logger.error(f"Valid file extensions: {['json', 'yaml']}")
         logger.error(f'Path specified: {save_path}')
         raise Exception
from imageaug import AugHandler, Augmenter as aug

save_img_path = "/home/pasonatech/detectron/detectron2/gbox/vis_image/"

dataset = COCO_Dataset.load_from_path(
    
    json_path='/home/pasonatech/combined_cocooutput/HSR-coco.json',
    img_dir='/home/pasonatech/combined_cocooutput'

    #json_path='/home/pasonatech/aug_real_combine/aug_sim_com_garbage/HSR-coco.json',
    #img_dir='/home/pasonatech/aug_real_combine/aug_sim_com_garbage'
)

resize_save_path = 'test_resize.json'
handler_save_path = 'test_handler.json'
if not file_exists(handler_save_path):
    handler = AugHandler(
        [
            aug.Crop(percent=[0.2, 0.5]),
            aug.Flipud(p=0.5),
            aug.Superpixels()
            # aug.Sharpen(alpha=[-1,0.1], lightness=[0,3])
        ]
    )
    handler.save_to_path(save_path=handler_save_path, overwrite=True)
    logger.info(f'Created new AugHandler save.')
else:
    handler = AugHandler.load_from_path(handler_save_path)
    logger.info(f'Loaded AugHandler from save.')
i=0
img_buffer = []
Example #14
0
 def _save_image(self, img: np.ndarray, save_path: str):
     if not file_exists(save_path):
         cv2.imwrite(filename=save_path, img=img)
     else:
         logger.error(f'File already exists: {save_path}')
         raise Exception
Example #15
0
    def to_coco(self,
                img_dir: str = None,
                mask_dir: str = None,
                coco_license: COCO_License = None,
                check_paths: bool = True,
                mask_lower_bgr: Tuple[int] = None,
                mask_upper_bgr: Tuple[int] = (255, 255, 255),
                show_pbar: bool = True) -> COCO_Dataset:
        dataset = COCO_Dataset.new(
            description='Dataset converted from Linemod to COCO format.')
        dataset.licenses.append(
            coco_license if coco_license is not None else COCO_License(
                url=
                'https://github.com/cm107/annotation_utils/blob/master/LICENSE',
                name='MIT License',
                id=len(dataset.licenses)))
        coco_license0 = dataset.licenses[-1]
        for linemod_image in self.images:
            file_name = get_filename(linemod_image.file_name)
            img_path = linemod_image.file_name if img_dir is None else f'{img_dir}/{file_name}'
            if file_exists(img_path):
                date_captured = get_ctime(img_path)
            else:
                if check_paths:
                    raise FileNotFoundError(
                        f"Couldn't find image at {img_path}")
                date_captured = ''
            coco_image = COCO_Image(license_id=coco_license0.id,
                                    file_name=file_name,
                                    coco_url=img_path,
                                    width=linemod_image.width,
                                    height=linemod_image.height,
                                    date_captured=date_captured,
                                    flickr_url=None,
                                    id=linemod_image.id)
            dataset.images.append(coco_image)

        pbar = tqdm(total=len(self.annotations),
                    unit='annotation(s)') if show_pbar else None
        if pbar is not None:
            pbar.set_description('Converting Linemod to COCO')
        for linemod_ann in self.annotations:
            mask_filename = get_filename(linemod_ann.mask_path)
            if mask_dir is not None:
                mask_path = f'{mask_dir}/{mask_filename}'
                if not file_exists(mask_path):
                    if check_paths:
                        raise FileNotFoundError(
                            f"Couldn't find mask at {mask_path}")
                    else:
                        seg = Segmentation()
                else:
                    seg = Segmentation.from_mask_path(mask_path,
                                                      lower_bgr=mask_lower_bgr,
                                                      upper_bgr=mask_upper_bgr)
            elif file_exists(linemod_ann.mask_path):
                seg = Segmentation.from_mask_path(linemod_ann.mask_path,
                                                  lower_bgr=mask_lower_bgr,
                                                  upper_bgr=mask_upper_bgr)
            elif img_dir is not None and file_exists(
                    f'{img_dir}/{mask_filename}'):
                seg = Segmentation.from_mask_path(f'{img_dir}/{mask_filename}',
                                                  lower_bgr=mask_lower_bgr,
                                                  upper_bgr=mask_upper_bgr)
            elif not check_paths:
                seg = Segmentation()
            else:
                raise FileNotFoundError(f"""
                    Couldn't resolve mask_path for calculating segmentation.
                    Please either specify mask_dir or correct the mask paths
                    in your linemod dataset.
                    linemod_ann.id: {linemod_ann.id}
                    linemod_ann.mask_path: {linemod_ann.mask_path}
                    """)
            if len(seg) > 0:
                bbox = seg.to_bbox()
            else:
                xmin = int(
                    linemod_ann.corner_2d.to_numpy(demarcation=True)[:,
                                                                     0].min())
                xmax = int(
                    linemod_ann.corner_2d.to_numpy(demarcation=True)[:,
                                                                     0].max())
                ymin = int(
                    linemod_ann.corner_2d.to_numpy(demarcation=True)[:,
                                                                     1].min())
                ymax = int(
                    linemod_ann.corner_2d.to_numpy(demarcation=True)[:,
                                                                     1].max())
                bbox = BBox(xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax)

            keypoints = Keypoint2D_List.from_point_list(linemod_ann.fps_2d,
                                                        visibility=2)
            keypoints_3d = Keypoint3D_List.from_point_list(linemod_ann.fps_3d,
                                                           visibility=2)
            num_keypoints = len(keypoints)

            if linemod_ann.category_id not in [
                    cat.id for cat in dataset.categories
            ]:
                linemod_cat = self.categories.get_obj_from_id(
                    linemod_ann.category_id)
                cat_keypoints = list(
                    'abcdefghijklmnopqrstuvwxyz'.upper())[:num_keypoints]
                cat_keypoints_idx_list = [
                    idx for idx in range(len(cat_keypoints))
                ]
                cat_keypoints_idx_list_shift_left = cat_keypoints_idx_list[
                    1:] + cat_keypoints_idx_list[:1]
                dataset.categories.append(
                    COCO_Category(
                        id=linemod_ann.category_id,
                        supercategory=linemod_cat.supercategory,
                        name=linemod_cat.name,
                        keypoints=cat_keypoints,
                        skeleton=[[start_idx, end_idx]
                                  for start_idx, end_idx in zip(
                                      cat_keypoints_idx_list,
                                      cat_keypoints_idx_list_shift_left)]))

            coco_ann = COCO_Annotation(
                id=linemod_ann.id,
                category_id=linemod_ann.category_id,
                image_id=linemod_ann.image_id,
                segmentation=seg,
                bbox=bbox,
                area=bbox.area(),
                keypoints=keypoints,
                num_keypoints=num_keypoints,
                iscrowd=0,
                keypoints_3d=keypoints_3d,
                camera=COCO_Camera(f=[linemod_ann.K.fx, linemod_ann.K.fy],
                                   c=[linemod_ann.K.cx, linemod_ann.K.cy],
                                   T=[0, 0]))
            dataset.annotations.append(coco_ann)
            if pbar is not None:
                pbar.update()
        if pbar is not None:
            pbar.close()
        return dataset
Example #16
0
    'pvnet-darwin20210105-epoch399',
    'pvnet-darwin20210105-epoch499',
    'pvnet-darwin20210105-epoch599',
    'pvnet-darwin20210105-epoch699',
    'pvnet-darwin20210105-epoch799',
]
test_root_dir = '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera'
csv_paths = recursively_get_all_filepaths_of_extension(test_root_dir,
                                                       extension='csv')
test_names, datasets = [], []
for csv_path in csv_paths:
    test_name = get_rootname_from_path(csv_path)
    img_dir = f'{get_dirpath_from_filepath(csv_path)}/images'
    assert dir_exists(img_dir), f"Couldn't find image directory: {img_dir}"
    ann_path = f'{img_dir}/output.json'
    if not file_exists(ann_path):
        continue
    dataset = COCO_Dataset.load_from_path(ann_path, img_dir=img_dir)
    test_names.append(test_name)
    datasets.append(dataset)

linemod_dataset = Linemod_Dataset.load_from_path(
    f'/home/clayton/workspace/prj/data/misc_dataset/darwin_datasets/coco2linemod/darwin20210105_blackout/train.json'
)
linemod_ann_sample = linemod_dataset.annotations[0]
kpt_3d = linemod_ann_sample.fps_3d.copy()
kpt_3d.append(linemod_ann_sample.center_3d)
corner_3d = linemod_ann_sample.corner_3d
# K = linemod_ann_sample.K
K = np.array([
    517.799858, 0.000000, 303.876287, 0.000000, 514.807834, 238.157119,
Example #17
0
def prepare_datasets_from_excel(
        xlsx_path: str,
        dst_root_dir: str,
        usecols: str = 'A:L',
        skiprows: int = None,
        skipfooter: int = 0,
        skip_existing: bool = False,
        val_target_proportion: float = 0.05,
        min_val_size: int = None,
        max_val_size: int = None,
        orig_config_save: str = 'orig.yaml',
        reorganized_config_save: str = 'dataset_config.yaml',
        show_pbar: bool = True):
    """
    Parameters:
        xlsx_path - Path to excel sheet that contains all of the information about where your datasets are located.
        dst_root_dir - Path to where you would like to save your prepared scenario datasets (split into train and val)
        usecols - Specify which columns you would like to parse from the excel sheet at xlsx_path. [Default: 'A:L']
        skiprows - Specify the number of rows from the top that you would like to skip when parsing the excel sheet. [Default: None]
        skipfooter - Specify the number of rows from the bottom that you would like to skip when parsing the excel sheet. [Default: 0]
        skip_existing - If you terminated dataset preparation midway, you can skip the scenarios that were already made using skip_existing=True. [Default: False]
        val_target_proportion - The proportion of your scenario that you would like to allocate to validation. [Default: 0.05]
        min_val_size - The minimum number of images that you would like to use for validation. [Default: None]
        max_val_size - The maximum number of images that you would like to use for validation. [Default: None]
        orig_config_save - Where you would like to save the dataset configuration representing your scenario_root_dir. [Default: 'orig.yaml]
        reorganized_config_save - Where you would like to save the dataset configuration representing your dst_root_dir. [Default: 'dataset_config.yaml']
        show_pbar - Whether or not you would like to show a progress bar during preparation. [Default: True]
    
    Description:
        The datasets specified in the excel sheet at xlsx_path will be combined and then split into a train + validation folder.
        Since the absolute paths of both image directories and annotation paths are parsed from the excel sheet, there is no need to place any restrictions
        on where each dataset needs to be located.

        The destination root directory will have the following structure:
            dst_root_dir
                scenario0
                    train
                    val
                scenario1
                    train
                    val
                scenario2
                    train
                    val
                ...

        The dataset configuration file saved at reorganized_config_save will reflect the directory structure of dst_root_dir.
        The configuration file representing the directory structure defined in your excel sheet is saved under orig_config_save.

        Note that orig_config_save and reorganized_config_save do not have to be inside of dst_root_dir.
        On the contrary, it is recommended to not save orig_config_save and reorganized_config_save inside of dst_root_dir.
        It is recommended that you change the path of orig_config_save and reorganized_config_save everytime you make an addition to your datasets.
        This is because you will likely want to keep track of the previous states of your dataset configuration, and you
        may also want to rollback to a previous configuration at any given time.
    """
    # Parse Excel Sheet
    if not file_exists(xlsx_path):
        raise FileNotFoundError(f'File not found: {xlsx_path}')
    data_df = pd.read_excel(xlsx_path,
                            usecols=usecols,
                            skiprows=skiprows,
                            skipfooter=skipfooter)
    data_records = data_df.to_dict(orient='records')

    required_keys = [
        'Scenario Name', 'Dataset Name', 'Image Directory', 'Annotation Path'
    ]
    parsed_keys = list(data_records[0].keys())
    missing_keys = []
    for required_key in required_keys:
        if required_key not in parsed_keys:
            missing_keys.append(required_key)
    if len(missing_keys) > 0:
        raise KeyError(f"""
            Couldn't find the following required keys in the given excel sheet:
            missing_keys: {missing_keys}
            required_keys: {required_keys}
            parsed_keys: {parsed_keys}
            xlsx_path: {xlsx_path}

            Please check your excel sheet and script parameters and try again.
            Note: usecols, skiprows, and skipfooter affect which parts of the excel sheet are parsed.
            """)

    def is_empty_cell(info_dict: Dict[str, str],
                      key: str,
                      expected_type: type = str) -> bool:
        return not isinstance(info_dict[key], expected_type) and math.isnan(
            info_dict[key])

    collection_handler = DatasetConfigCollectionHandler()
    current_scenario_name = None
    working_config_list = cast(List[DatasetConfig], [])
    pbar = tqdm(total=len(data_records), unit='item(s)') if show_pbar else None
    if pbar is not None:
        pbar.set_description('Parsing Excel Sheet')
    for info_dict in data_records:
        for required_cell_key in [
                'Dataset Name', 'Image Directory', 'Annotation Path'
        ]:
            if is_empty_cell(info_dict,
                             key=required_cell_key,
                             expected_type=str):
                raise ValueError(f"""
                    Encountered empty cell under {required_cell_key}.
                    Row Dictionary: {info_dict}
                    xlsx_path: {xlsx_path}
                    Please check your excel sheet.
                    """)
        assert 'Scenario Name' in info_dict
        scenario_name = info_dict['Scenario Name'] \
            if 'Scenario Name' in info_dict and not is_empty_cell(info_dict, key='Scenario Name', expected_type=str) \
            else None
        dataset_name = info_dict['Dataset Name']
        img_dir = info_dict['Image Directory']
        ann_path = info_dict['Annotation Path']
        if scenario_name is not None:
            if len(working_config_list) > 0:
                collection = DatasetConfigCollection(working_config_list,
                                                     tag=current_scenario_name)
                collection_handler.append(collection)
                working_config_list = []
            current_scenario_name = scenario_name
        config = DatasetConfig(img_dir=img_dir,
                               ann_path=ann_path,
                               ann_format='coco',
                               tag=dataset_name)
        working_config_list.append(config)
        if pbar is not None:
            pbar.update()
    if len(working_config_list) > 0:
        collection = DatasetConfigCollection(working_config_list,
                                             tag=current_scenario_name)
        collection_handler.append(collection)
        working_config_list = []
    if pbar is not None:
        pbar.close()
    collection_handler.save_to_path(orig_config_save, overwrite=True)

    # Combine Datasets
    train_collection = DatasetConfigCollection(tag='train')
    val_collection = DatasetConfigCollection(tag='val')

    make_dir_if_not_exists(dst_root_dir)
    pbar = tqdm(total=len(collection_handler),
                unit='scenario(s)') if show_pbar else None
    if pbar is not None:
        pbar.set_description('Combining Scenarios')
    for collection in collection_handler:
        scenario_root_dir = f'{dst_root_dir}/{collection.tag}'
        make_dir_if_not_exists(scenario_root_dir)
        scenario_train_dir = f'{scenario_root_dir}/train'
        make_dir_if_not_exists(scenario_train_dir)
        scenario_val_dir = f'{scenario_root_dir}/val'
        make_dir_if_not_exists(scenario_val_dir)

        if (not file_exists(f'{scenario_train_dir}/output.json')
                or not file_exists(f'{scenario_val_dir}/output.json')
            ) or not skip_existing:
            combined_dataset = COCO_Dataset.combine_from_config(
                collection, img_sort_attr_name='file_name', show_pbar=False)
            orig_num_images = len(combined_dataset.images)
            assert orig_num_images >= 2, f'{collection.tag} has only {orig_num_images} images, and thus cannot be split into train and val.'
            num_val = int(len(combined_dataset.images) * val_target_proportion)
            num_val = 1 if num_val == 0 else num_val
            num_val = min_val_size if min_val_size is not None and num_val < min_val_size else num_val
            num_val = max_val_size if max_val_size is not None and num_val > max_val_size else num_val
            num_train = orig_num_images - num_val
            train_dataset, val_dataset = combined_dataset.split_into_parts(
                ratio=[num_train, num_val], shuffle=True)

            train_dataset.move_images(dst_img_dir=scenario_train_dir,
                                      preserve_filenames=False,
                                      update_img_paths=True,
                                      overwrite=True,
                                      show_pbar=False)
            train_dataset.save_to_path(f'{scenario_train_dir}/output.json',
                                       overwrite=True)
            train_collection.append(
                DatasetConfig(img_dir=scenario_train_dir,
                              ann_path=f'{scenario_train_dir}/output.json',
                              tag=f'{collection.tag}_train'))

            val_dataset.move_images(dst_img_dir=scenario_val_dir,
                                    preserve_filenames=False,
                                    update_img_paths=True,
                                    overwrite=True,
                                    show_pbar=False)
            val_dataset.save_to_path(f'{scenario_val_dir}/output.json',
                                     overwrite=True)
            val_collection.append(
                DatasetConfig(img_dir=scenario_val_dir,
                              ann_path=f'{scenario_val_dir}/output.json',
                              tag=f'{collection.tag}_val'))
        else:
            train_dataset = COCO_Dataset.load_from_path(
                f'{scenario_train_dir}/output.json',
                img_dir=f'{scenario_train_dir}')
            train_collection.append(
                DatasetConfig(img_dir=scenario_train_dir,
                              ann_path=f'{scenario_train_dir}/output.json',
                              tag=f'{collection.tag}_train'))
            val_dataset = COCO_Dataset.load_from_path(
                f'{scenario_val_dir}/output.json',
                img_dir=f'{scenario_val_dir}')
            val_collection.append(
                DatasetConfig(img_dir=scenario_val_dir,
                              ann_path=f'{scenario_val_dir}/output.json',
                              tag=f'{collection.tag}_val'))
        if pbar is not None:
            pbar.update()
    if pbar is not None:
        pbar.close()

    organized_collection_handler = DatasetConfigCollectionHandler(
        [train_collection, val_collection])
    organized_collection_handler.save_to_path(reorganized_config_save,
                                              overwrite=True)
Example #18
0
from common_utils.file_utils import file_exists
import cv2
from common_utils.common_types.keypoint import Keypoint2D_List, Keypoint2D

from imageaug import AugHandler, Augmenter as aug

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/Users/darwinharianto/Desktop/hayashida/Unreal/18_03_2020_18_03_10_coco-data/HSR-coco.json',
    img_dir=
    '/Users/darwinharianto/Desktop/hayashida/Unreal/18_03_2020_18_03_10_coco-data'
)

resize_save_path = 'test_resize.json'
handler_save_path = 'test_handler.json'
if not file_exists(resize_save_path):
    resize = aug.Resize(width=500, height=500)
    resize.save_to_path(save_path=resize_save_path, overwrite=True)
    logger.info(f'Created new Resize save.')
else:
    resize = aug.Resize.load_from_path(resize_save_path)
    logger.info(f'Loaded Resize from save.')
if not file_exists(handler_save_path):
    handler = AugHandler([
        aug.Crop(percent=[0.2, 0.5]),
        aug.Flipud(p=0.5),
        aug.Superpixels()
        # aug.Sharpen(alpha=[-1,0.1], lightness=[0,3])
    ])
    handler.save_to_path(save_path=handler_save_path, overwrite=True)
    logger.info(f'Created new AugHandler save.')
Example #19
0
        def _wrapper_inner(*args, **kwargs):
            # Check/Adjust Parameters
            if isinstance(weight_path, (str, dict)):
                weight_paths = [weight_path]
            elif isinstance(weight_path, (tuple, list)):
                assert all([type(part) in [str, dict] for part in weight_path])
                for part in weight_path:
                    if isinstance(part, dict):
                        for key, val in part.items():
                            assert isinstance(val, str)
                weight_paths = weight_path
            else:
                raise TypeError
            if isinstance(model_name, str):
                model_names = [model_name]
            elif isinstance(model_name, (tuple, list)):
                assert all([type(part) is str for part in model_name])
                model_names = model_name
            else:
                raise TypeError
            assert len(weight_paths) == len(model_names)
            if isinstance(dataset, COCO_Dataset):
                datasets = [dataset]
            elif isinstance(dataset, (tuple, list)):
                assert all(
                    [isinstance(part, COCO_Dataset) for part in dataset])
                datasets = dataset
            else:
                raise TypeError
            if isinstance(test_name, str):
                test_names = [test_name]
            elif isinstance(test_name, (tuple, list)):
                assert all([type(part) is str for part in test_name])
                test_names = test_name
            else:
                raise TypeError
            assert len(datasets) == len(test_names)

            # Prepare Dump Directory
            if data_dump_dir is not None:
                make_dir_if_not_exists(data_dump_dir)
                # delete_all_files_in_dir(data_dump_dir, ask_permission=True)
            if video_dump_dir is not None:
                make_dir_if_not_exists(video_dump_dir)
                # delete_all_files_in_dir(video_dump_dir, ask_permission=True)
            if img_dump_dir is not None:
                make_dir_if_not_exists(img_dump_dir)
                # delete_all_files_in_dir(img_dump_dir, ask_permission=True)
            stream_writer = cast(StreamWriter, None)

            # Accumulate/Save Inference Data On Tests
            total_images = sum([len(dataset.images) for dataset in datasets])
            test_pbar = tqdm(total=total_images * len(model_names),
                             unit='image(s)',
                             leave=True) if show_pbar else None
            reserved_params = [
                'weight_path', 'model_name', 'dataset', 'test_name',
                'accumulate_pred_dump', 'stream_writer',
                'leave_stream_writer_open'
            ]
            for param in reserved_params:
                assert param not in kwargs, f'{param} already exists in kwargs'
                assert param in infer_func.__annotations__, f"{infer_func.__name__} needs to accept a {param} keyword argument to be wrapped by infer_tests_wrapper"
            for weight_path0, model_name0 in zip(weight_paths, model_names):
                video_save_path = f'{video_dump_dir}/{model_name0}.avi' if video_dump_dir is not None else None
                data_dump_save = f'{data_dump_dir}/{model_name0}.json' if data_dump_dir is not None else None
                if data_dump_save is not None and file_exists(
                        data_dump_save) and skip_if_data_dump_exists:
                    if test_pbar is not None:
                        for dataset0, test_name0 in zip(datasets, test_names):
                            test_pbar.update(len(dataset0.images))
                    continue
                if stream_writer is None:
                    stream_writer = StreamWriter(
                        show_preview=show_preview,
                        video_save_path=video_save_path,
                        dump_dir=img_dump_dir)
                elif video_save_path is not None:
                    stream_writer.video_writer._save_path = video_save_path
                if img_dump_dir is not None:
                    model_img_dump_dir = f'{img_dump_dir}/{model_name0}'
                    make_dir_if_not_exists(model_img_dump_dir)
                else:
                    model_img_dump_dir = None
                data = handler_constructor()
                assert isinstance(data, BasicLoadableHandler)
                assert hasattr(data, '__add__')
                # if video_dump_dir is not None:
                #     video_save_path = f'{video_dump_dir}/{model_name0}.avi'
                # else:
                #     video_save_path = None
                for dataset0, test_name0 in zip(datasets, test_names):
                    if test_pbar is not None:
                        test_pbar.set_description(
                            f'{model_name0} {test_name0}')
                    if img_dump_dir is not None:
                        test_img_dump_dir = f'{model_img_dump_dir}/{test_name0}'
                        make_dir_if_not_exists(test_img_dump_dir)
                        stream_writer.dump_writer._save_dir = test_img_dump_dir
                    kwargs['weight_path'] = weight_path0
                    kwargs['model_name'] = model_name0
                    kwargs['dataset'] = dataset0
                    kwargs['test_name'] = test_name0
                    kwargs['accumulate_pred_dump'] = data_dump_dir is not None
                    kwargs['stream_writer'] = stream_writer
                    kwargs['leave_stream_writer_open'] = True
                    if data_dump_dir is not None:
                        data0 = infer_func(*args, **kwargs)
                        assert isinstance(
                            data0, handler_constructor
                        ), f"Encountered dump data of type {type(data0).__name__}. Expected {handler_constructor.__name__}."
                        data += data0
                    else:
                        infer_func(*args, **kwargs)
                    if test_pbar is not None:
                        test_pbar.update(len(dataset0.images))
                if data_dump_dir is not None:
                    data.save_to_path(data_dump_save, overwrite=True)
                if stream_writer is not None and stream_writer.video_writer is not None and stream_writer.video_writer.recorder is not None:
                    stream_writer.video_writer.recorder.close()
                    stream_writer.video_writer.recorder = None
            if test_pbar is not None:
                test_pbar.close()
            if stream_writer is not None:
                del stream_writer
Example #20
0
 def load_from_txt(self, load_path: str) -> LinemodCamera:
     if not file_exists(load_path):
         raise FileNotFoundError(f"Couldn't find file at {load_path}")
     mat = np.loadtxt(load_path)
     return LinemodCamera.from_matrix(mat)
Example #21
0
def gen_infer_comparison(gt: BasicLoadableHandler,
                         dt: BasicLoadableHandler,
                         error: BasicLoadableHandler,
                         model_names: List[str],
                         test_names: List[str],
                         collage_shape: (int, int),
                         test_img_dir_map: Dict[str, str] = None,
                         model_aliases: Dict[str, str] = None,
                         test_aliases: Dict[str, str] = None,
                         video_save: str = None,
                         img_dump_dir: str = None,
                         show_preview: bool = False,
                         show_pbar: bool = True,
                         draw_settings=None,
                         draw_inference: bool = False,
                         details_func=None,
                         debug_verbose: bool = False):
    for handler in [gt, dt, error]:
        assert isinstance(handler, BasicLoadableHandler)
        for attr_key in ['frame', 'test_name']:
            assert hasattr(handler[0], attr_key)
    for handler in [dt, error]:
        assert hasattr(handler[0], 'model_name')
    model_names0 = list(set([datum.model_name for datum in dt
                             ])) if model_names == 'all' else model_names
    test_names0 = list(set([datum.test_name for datum in gt
                            ])) if test_names == 'all' else test_names
    for val_list in [model_names0, test_names0]:
        if val_list != 'all':
            assert isinstance(val_list, (tuple, list))
            for val in val_list:
                assert isinstance(val, str)
    assert isinstance(collage_shape, (tuple, list))
    for val in collage_shape:
        assert isinstance(val, int)
    assert len(collage_shape) == 2
    assert len(model_names0) <= collage_shape[0] * collage_shape[1]
    if img_dump_dir is not None:
        make_dir_if_not_exists(img_dump_dir)
        delete_all_files_in_dir(img_dump_dir, ask_permission=False)
    if test_img_dir_map is None:
        test_img_dir_map0 = {test_name: test_name for test_name in test_names0}
    else:
        assert isinstance(test_img_dir_map, dict)
        for key, val in test_img_dir_map.items():
            assert key in test_names0
            assert isinstance(key, str)
            assert isinstance(val, str)
        test_img_dir_map0 = {
            test_name: (test_img_dir_map[test_name] if test_name in test_img_dir_map else test_name) \
            for test_name in test_names0
        }
    for test_name, img_dir in test_img_dir_map0.items():
        if not dir_exists(img_dir):
            raise FileNotFoundError(f"""
                Couldn't find image directory {img_dir} for {test_name}.
                Please modify test_img_dir_map to match the image directory path for {test_name}.
                test_img_dir_map: {test_img_dir_map0}
                """)
    stream_writer = StreamWriter(show_preview=show_preview,
                                 video_save_path=video_save,
                                 dump_dir=img_dump_dir)

    total_images = len(gt.get(test_name=test_names0))
    pbar = tqdm(total=total_images, unit='image(s)',
                leave=True) if show_pbar else None
    if pbar is not None:
        pbar.set_description('Generating Comparison')
    for test_name in test_names0:
        if img_dump_dir is not None:
            test_img_dump_dir = f'{img_dump_dir}/{test_name}'
            make_dir_if_not_exists(test_img_dump_dir)
            stream_writer.dump_writer._save_dir = test_img_dump_dir

        img_dir = test_img_dir_map0[test_name]
        gt_test_data = gt.get(test_name=test_name)
        gt_test_data.sort(attr_name='frame')
        dt_test_data = dt.get(test_name=test_name)
        dt_test_data.sort(attr_name='frame')
        error_test_data = error.get(test_name=test_name)
        error_test_data.sort(attr_name='frame')

        for gt_datum in gt_test_data:
            file_name = gt_datum.frame
            img_path = f'{img_dir}/{file_name}'
            if not file_exists(img_path):
                if debug_verbose:
                    print(f"""
                        Couldn't find image. Skipping.
                            test_name: {test_name}
                            img_path: {img_path}
                        """)
                pbar.update()
                continue
            img = cv2.imread(img_path)
            dt_frame_data = dt_test_data.get(frame=gt_datum.frame)
            error_frame_data = error_test_data.get(frame=gt_datum.frame)

            img_buffer = cast(List[np.ndarray], [])
            for model_name in model_names0:
                dt_model_data = dt_frame_data.get(model_name=model_name)
                dt_model_datum = dt_model_data[0] if len(
                    dt_model_data) > 0 else None
                error_model_data = error_frame_data.get(model_name=model_name)
                error_datum = error_model_data[0] if len(
                    error_model_data) > 0 else None

                result = img.copy()
                if draw_inference or draw_settings is not None:
                    if draw_settings is not None:
                        if dt_model_datum is not None:
                            result = dt_model_datum.draw(
                                result, settings=draw_settings)
                    else:
                        if dt_model_datum is not None:
                            result = dt_model_datum.draw(result)

                if test_aliases is not None and gt_datum.test_name in test_aliases:
                    test_text = test_aliases[gt_datum.test_name]
                else:
                    test_text = gt_datum.test_name if gt_datum is not None else None
                if model_aliases is not None and dt_model_datum.model_name in model_aliases:
                    model_text = model_aliases[dt_model_datum.model_name]
                else:
                    model_text = dt_model_datum.model_name if dt_model_datum is not None else None

                if details_func is not None:
                    for key in ['gt', 'dt', 'error']:
                        assert key in details_func.__annotations__, f'{details_func.__name__} must have a {key} parameter.'
                    details_func_params = {
                        'img': result,
                        'gt': gt_datum,
                        'dt': dt_model_datum,
                        'error': error_datum
                    }
                    suggested_params = {
                        'test_text': test_text,
                        'model_text': model_text,
                        'frame_text': gt_datum.frame
                    }
                    for key, val in suggested_params.items():
                        if key in details_func.__annotations__:
                            details_func_params[key] = val
                    result = details_func(**details_func_params)
                else:
                    row_text_list = [
                        f'Test: {test_text}', f'Model: {model_text}',
                        f'Frame: {gt_datum.frame}'
                    ]
                    result_h, result_w = result.shape[:2]
                    combined_row_height = len(row_text_list) * 0.04 * result_h
                    result = draw_text_rows_at_point(
                        img=result,
                        row_text_list=row_text_list,
                        x=result_w * 0.01,
                        y=result_h * 0.01,
                        combined_row_height=combined_row_height)
                img_buffer.append(result)
            collage_img = collage_from_img_buffer(img_buffer=img_buffer,
                                                  collage_shape=collage_shape)
            stream_writer.step(img=collage_img, file_name=file_name)
            if pbar is not None:
                pbar.update()
    if pbar is not None:
        pbar.close()
Example #22
0
from annotation_utils.coco.structs import COCO_Dataset
from common_utils.file_utils import file_exists

ann_save_path = 'preview_dataset.json'
if not file_exists(ann_save_path):
    dataset = COCO_Dataset.combine_from_config(
        config_path=
        '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/config/yaml/box_hsr_kpt_trainval.yaml',
        img_sort_attr_name='file_name',
        show_pbar=True)
    dataset.save_to_path(ann_save_path)
else:
    dataset = COCO_Dataset.load_from_path(json_path=ann_save_path)
dataset.display_preview(kpt_idx_offset=-1, show_details=True)
Example #23
0
linemod_ann_sample = linemod_dataset.annotations[0]
kpt_3d = linemod_ann_sample.fps_3d.copy()
kpt_3d.append(linemod_ann_sample.center_3d)
corner_3d = linemod_ann_sample.corner_3d
K = linemod_ann_sample.K
linemod_image_sample = linemod_dataset.images[0]
dsize = (linemod_image_sample.width, linemod_image_sample.height)

weights_dir = '/home/clayton/workspace/git/clean-pvnet/data/model/pvnet/custom'
weight_path_list = get_all_files_of_extension(weights_dir, 'pth')
weight_path_list.sort()
infer_data_dump_dir = '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera/infer_dump'
make_dir_if_not_exists(infer_data_dump_dir)
# delete_all_files_in_dir(infer_data_dump_dir, ask_permission=True)
weights_pbar = tqdm(total=len(weight_path_list), unit='weight(s)')
for weight_path in weight_path_list:
    rootname = get_rootname_from_path(weight_path)
    weights_pbar.set_description(rootname)
    pred_dump_path = f'{infer_data_dump_dir}/{rootname}.json'
    if file_exists(pred_dump_path):
        weights_pbar.update()
        continue
    inferer = PVNetInferer(weight_path=weight_path)
    inferer.infer_coco_dataset(dataset=coco_dataset,
                               kpt_3d=kpt_3d,
                               corner_3d=corner_3d,
                               K=K,
                               blackout=True,
                               dsize=dsize,
                               pred_dump_path=pred_dump_path)
    weights_pbar.update()
Example #24
0
    def move(self,
             dst_dataroot: str,
             include_depth: bool = True,
             include_RT: bool = False,
             camera_path: str = None,
             fps_path: str = None,
             preserve_filename: bool = False,
             use_softlink: bool = False,
             ask_permission_on_delete: bool = True,
             show_pbar: bool = True):
        make_dir_if_not_exists(dst_dataroot)
        delete_all_files_in_dir(dst_dataroot,
                                ask_permission=ask_permission_on_delete,
                                verbose=False)
        processed_image_id_list = []
        pbar = tqdm(total=len(self.annotations),
                    unit='annotation(s)',
                    leave=True) if show_pbar else None
        if pbar is not None:
            pbar.set_description('Moving Linemod Dataset Data')
        for linemod_ann in self.annotations:
            if not dir_exists(linemod_ann.data_root):
                raise FileNotFoundError(
                    f"Couldn't find data_root at {linemod_ann.data_root}")

            # Images
            linemod_image = self.images.get(id=linemod_ann.image_id)[0]
            if linemod_image.id not in processed_image_id_list:
                img_path = f'{linemod_ann.data_root}/{get_filename(linemod_image.file_name)}'
                if not file_exists(img_path):
                    raise FileNotFoundError(
                        f"Couldn't find image at {img_path}")
                if preserve_filename:
                    dst_img_path = f'{dst_dataroot}/{get_filename(linemod_image.file_name)}'
                    if file_exists(dst_img_path):
                        raise FileExistsError(f"""
                            Image already exists at {dst_img_path}
                            Hint: Use preserve_filename=False to bypass this error.
                            """)
                else:
                    dst_filename = f'{linemod_image.id}.{get_extension_from_filename(linemod_image.file_name)}'
                    linemod_image.file_name = dst_filename
                    dst_img_path = f'{dst_dataroot}/{dst_filename}'
                if not use_softlink:
                    copy_file(src_path=img_path,
                              dest_path=dst_img_path,
                              silent=True)
                else:
                    create_softlink(src_path=rel_to_abs_path(img_path),
                                    dst_path=rel_to_abs_path(dst_img_path))
                processed_image_id_list.append(linemod_image.id)

            # Masks
            if not file_exists(linemod_ann.mask_path):
                raise FileNotFoundError(
                    f"Couldn't find mask at {linemod_ann.mask_path}")
            mask_path = linemod_ann.mask_path
            if preserve_filename:
                dst_mask_path = f'{dst_dataroot}/{get_filename(linemod_ann.mask_path)}'
                if file_exists(dst_mask_path):
                    raise FileExistsError(f"""
                        Mask already exists at {dst_mask_path}
                        Hint: Use preserve_filename=False to bypass this error.
                        """)
            else:
                mask_filename = get_filename(linemod_ann.mask_path)
                dst_filename = f'{linemod_ann.id}_mask.{get_extension_from_filename(mask_filename)}'
                dst_mask_path = f'{dst_dataroot}/{dst_filename}'
                linemod_ann.mask_path = dst_mask_path
            if not use_softlink:
                copy_file(src_path=mask_path,
                          dest_path=dst_mask_path,
                          silent=True)
            else:
                create_softlink(src_path=rel_to_abs_path(mask_path),
                                dst_path=rel_to_abs_path(dst_mask_path))

            # Depth
            if include_depth and linemod_ann.depth_path is not None:
                if not file_exists(linemod_ann.depth_path):
                    raise FileNotFoundError(
                        f"Couldn't find depth at {linemod_ann.depth_path}")
                depth_path = linemod_ann.depth_path
                if preserve_filename:
                    dst_depth_path = f'{dst_dataroot}/{get_filename(linemod_ann.depth_path)}'
                    if file_exists(dst_depth_path):
                        raise FileExistsError(f"""
                            Depth already exists at {dst_depth_path}
                            Hint: Use preserve_filename=False to bypass this error.
                            """)
                else:
                    depth_filename = get_filename(linemod_ann.depth_path)
                    dst_filename = f'{linemod_ann.id}_depth.{get_extension_from_filename(depth_filename)}'
                    dst_depth_path = f'{dst_dataroot}/{dst_filename}'
                    linemod_ann.depth_path = dst_depth_path
                if not use_softlink:
                    copy_file(src_path=depth_path,
                              dest_path=dst_depth_path,
                              silent=True)
                else:
                    create_softlink(src_path=rel_to_abs_path(depth_path),
                                    dst_path=rel_to_abs_path(dst_depth_path))

            # RT pickle files
            if include_RT:
                rootname = get_rootname_from_path(mask_path)
                if rootname.endswith('_mask'):
                    rootname = rootname.replace('_mask', '')
                rt_filename = f'{rootname}_RT.pkl'
                rt_path = f'{linemod_ann.data_root}/{rt_filename}'
                if not file_exists(rt_path):
                    raise FileNotFoundError(
                        f"Couldn't find RT pickle file at {rt_path}")
                if preserve_filename:
                    dst_rt_path = f'{dst_dataroot}/{rt_filename}'
                    if file_exists(dst_depth_path):
                        raise FileExistsError(f"""
                            RT pickle file already exists at {dst_rt_path}
                            Hint: Use preserve_filename=False to bypass this error.
                            """)
                else:
                    dst_rt_filename = f'{linemod_ann.id}_RT.pkl'
                    dst_rt_path = f'{dst_dataroot}/{dst_rt_filename}'
                if not use_softlink:
                    copy_file(src_path=rt_path,
                              dest_path=dst_rt_path,
                              silent=True)
                else:
                    create_softlink(src_path=rel_to_abs_path(rt_path),
                                    dst_path=rel_to_abs_path(dst_rt_path))
            if pbar is not None:
                pbar.update()
        # Camera setting
        if camera_path is not None:
            if not file_exists(camera_path):
                raise FileNotFoundError(
                    f"Couldn't find camera settings at {camera_path}")
            dst_camera_path = f'{dst_dataroot}/{get_filename(camera_path)}'
            if file_exists(dst_camera_path):
                raise FileExistsError(
                    f'Camera settings already saved at {dst_camera_path}')
            if not use_softlink:
                copy_file(src_path=camera_path,
                          dest_path=dst_camera_path,
                          silent=True)
            else:
                create_softlink(src_path=rel_to_abs_path(camera_path),
                                dst_path=rel_to_abs_path(dst_camera_path))

        # FPS setting
        if fps_path is not None:
            if not file_exists(fps_path):
                raise FileNotFoundError(
                    f"Couldn't find FPS settings at {fps_path}")
            dst_fps_path = f'{dst_dataroot}/{get_filename(fps_path)}'
            if file_exists(dst_fps_path):
                raise FileExistsError(
                    f'FPS settings already saved at {dst_fps_path}')
            if not use_softlink:
                copy_file(src_path=fps_path,
                          dest_path=dst_fps_path,
                          silent=True)
            else:
                create_softlink(src_path=rel_to_abs_path(fps_path),
                                dst_path=rel_to_abs_path(dst_fps_path))
        if pbar is not None:
            pbar.close()
from common_utils.file_utils import file_exists
from annotation_utils.coco.structs import COCO_Dataset, COCO_Category_Handler, COCO_Category
from annotation_utils.labelme.structs import LabelmeAnnotationHandler

# Define Labelme Directory Paths
img_dir = '/path/to/labelme/img/dir'
json_dir = '/path/to/labelme/json/dir'

# Load Labelme Handler
labelme_handler = LabelmeAnnotationHandler.load_from_dir(load_dir=json_dir)

# Define COCO Categories Before Conversion
if not file_exists('categories_example.json'): # Save a new categories json if it doesn't already exist.
    categories = COCO_Category_Handler()
    categories.append( # Standard Keypoint Example
        COCO_Category(
            id=len(categories),
            supercategory='pet',
            name='dog',
            keypoints=[ # The keypoint labels are defined here
                'left_eye', 'right_eye', # 0, 1
                'mouth_left', 'mouth_center', 'mouth_right' # 2, 3, 4
            ],
            skeleton=[ # The connections between keypoints are defined with indecies here
                [0, 1],
                [2, 3], [3,4]
            ]
        )
    )
    categories.append( # Simple Keypoint Example
        COCO_Category.from_label_skeleton(
Example #26
0
from annotation_utils.ndds.structs import NDDS_Dataset
from annotation_utils.coco.structs import COCO_Category_Handler, COCO_Dataset
from common_utils.file_utils import file_exists

result_json = 'conv_test.json'

if not file_exists(result_json):
    # Load NDDS Dataset
    ndds_dataset = NDDS_Dataset.load_from_dir(
        json_dir='/home/clayton/workspace/prj/data_keep/data/ndds/mv_500',
        show_pbar=True)

    # Fix NDDS Dataset naming so that it follows convention. (This is not necessary if the NDDS dataset already follows the naming convention.)
    number_spelling_map = {
        'zero': 0,
        'one': 1,
        'two': 2,
        'three': 3,
        'four': 4,
        'five': 5,
        'six': 6,
        'seven': 7,
        'eight': 8,
        'nine': 9
    }

    for frame in ndds_dataset.frames:
        # Fix Naming Convention
        for ann_obj in frame.ndds_ann.objects:
            # Note: Part numbers should be specified in the obj_type string.
            if ann_obj.class_name == 'measure':