Esempio n. 1
0
def run(
    path: str,
    key: str = 'measure',
    show_image: bool = False,
    show_video: bool = True,
):
    dataset = COCO_Dataset.load_from_path(json_path=f'{path}/{key}-coco.json',
                                          img_dir=path,
                                          strict=False)
    # for i, coco_ann in enumerate(dataset.annotations):
    #     if i % 2 == 0:
    #         coco_ann.segmentation = Segmentation()
    #     if i % 3 == 0:
    #         coco_ann.keypoints = Keypoint2D_List()
    #         coco_ann.num_keypoints = 0
    for coco_ann in dataset.annotations:
        coco_ann.segmentation = Segmentation()
        coco_ann.keypoints = Keypoint2D_List()
        coco_ann.num_keypoints = 0
        coco_ann.keypoints_3d = None
        coco_ann.camera = None
    for coco_cat in dataset.categories:
        coco_cat.keypoints = []
        coco_cat.skeleton = []
    dataset.save_to_path('non_strict_dataset.json',
                         overwrite=True,
                         strict=False)
    dataset0 = COCO_Dataset.load_from_path('non_strict_dataset.json',
                                           strict=False)
    dataset0.images.sort(attr_name='file_name')

    if show_image:
        dataset0.save_visualization(save_dir=f'{path}_{dt_string3}_visualize',
                                    show_preview=True,
                                    kpt_idx_offset=-1,
                                    overwrite=True,
                                    show_details=True)
    if show_video:
        dataset0.save_video(
            save_path=f'{path}_{dt_string3}.mp4',
            # draw_order=['screw'],
            show_details=True,
            show_preview=True,
            kpt_idx_offset=-1,
            overwrite=True,
            fps=5,
            show_seg=True)
    logger.green('Visualisation complete')
Esempio n. 2
0
def combine_dataset_from_config_file(config_path: str, dest_folder_img: str,
                                     dest_json_file: str):
    # combine dataset
    combined_dataset = COCO_Dataset.combine_from_config(
        config_path=config_path,
        img_sort_attr_name='file_name',
        show_pbar=True)
    combined_dataset.move_images(dst_img_dir=dest_folder_img,
                                 preserve_filenames=False,
                                 update_img_paths=True,
                                 overwrite=True,
                                 show_pbar=True)
    combined_dataset.save_to_path(save_path=dest_json_file, overwrite=True)
def coco_json_fixer(coco_img_dir: str = None, fix_status: bool = True) -> str:
    # def coco_json_fixer(coco_img_dir:str = None, blend_coco_anno:str = None, new_coco_anno:str = None, fix_status:bool=True):
    if fix_status:
        #fix coco
        org_coco_file = os.path.join(coco_img_dir, "coco_annotations.json")
        chk_file_exists(org_coco_file)
        fixed_coco_file = os.path.join(coco_img_dir,
                                       "fixed_coco_annotations.json")
        dataset = COCO_Dataset.load_from_path(org_coco_file,
                                              strict=False,
                                              img_dir=coco_img_dir)
        dataset.save_to_path(fixed_coco_file, strict=True, overwrite=True)
        seg_filter(fixed_coco_file)
        return fixed_coco_file
Esempio n. 4
0
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/home/clayton/workspace/prj/data_keep/data/sekisui/hook/coco/output.json',
    img_dir='/home/clayton/workspace/prj/data_keep/data/sekisui/hook/img')

for coco_image in dataset.images:
    anns = dataset.annotations.get_annotations_from_imgIds([coco_image.id])
    for ann in anns:
        ann.bbox = ann.bbox.scale_about_center(
            scale_factor=1.3,
            frame_shape=[coco_image.height, coco_image.width])

dataset.save_to_path('bbox_resized.json', overwrite=True)
dataset.display_preview(show_details=True, kpt_idx_offset=-1)
Esempio n. 5
0
from logger import logger
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path='bk_28_02_2020_11_18_30_coco-data/HSR-coco.json',
    img_dir='bk_28_02_2020_11_18_30_coco-data')
logger.purple(
    f'Flag0 len(dataset.images): {len(dataset.images)}, len(dataset.annotations): {len(dataset.annotations)}'
)
dataset.prune_keypoints(min_num_kpts=11, verbose=True)
logger.purple(
    f'Flag1 len(dataset.images): {len(dataset.images)}, len(dataset.annotations): {len(dataset.annotations)}'
)
dataset.move_images(dst_img_dir='test_img',
                    preserve_filenames=True,
                    update_img_paths=True,
                    overwrite=True,
                    show_pbar=True)
logger.purple(
    f'Flag2 len(dataset.images): {len(dataset.images)}, len(dataset.annotations): {len(dataset.annotations)}'
)
dataset.save_to_path(save_path='prune_test.json', overwrite=True)
dataset.display_preview(kpt_idx_offset=-1)
Esempio n. 6
0
from common_utils.common_types.keypoint import Keypoint2D_List, Keypoint2D
# import printj
from imageaug import AugHandler, Augmenter as aug
from random import choice
from tqdm import tqdm
# import imgaug.augmenters as iaa

# PATH='/home/jitesh/3d/data/coco_data/mp_200_23_04_2020_15_37_00_coco-data'
# path = '/home/jitesh/3d/data/coco_data/sample_measure_coco_data'
# path = '/home/jitesh/3d/data/coco_data/measure_combined7'
# dest_folder_img_combined = f'{path}/img'
# dest_json_file_combined = f'{path}/json/measure-only.json'
path = '/home/pasonatech/labelme/ndds2coco/6_22/bolt_mark/type4'
dest_folder_img_combined = f'{path}'
dest_json_file_combined = f'{path}/HSR-coco.json'
dataset = COCO_Dataset.load_from_path(json_path=dest_json_file_combined,
                                      img_dir=dest_folder_img_combined)
output = f'{path}/aug_vis'
make_dir_if_not_exists(output)
iaa = aug
# resize_save_path = 'test_resize.json'
handler_save_path = 'test_handler.json'
# if not file_exists(resize_save_path):
#     resize = aug.Resize(width=500, height=500)
#     resize.save_to_path(save_path=resize_save_path, overwrite=True)
#     logger.info(f'Created new Resize save.')
# else:
#     resize = aug.Resize.load_from_path(resize_save_path)
#     logger.info(f'Loaded Resize from save.')
# if not file_exists(handler_save_path):
#     handler = AugHandler(
#         [
Esempio n. 7
0
            obj_type, obj_name = 'seg', 'hsr'
            instance_name = '0'
            ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'
        elif ann_obj.class_name.lower() in list('abcdefghijkl'):
            obj_type, obj_name = 'kpt', 'hsr'
            instance_name, contained_name = '0', ann_obj.class_name
            ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}_{contained_name}'
        else:
            logger.error(f'Unknown ann_obj.class_name: {ann_obj.class_name}')
            raise Exception

# Convert To COCO Dataset
dataset = COCO_Dataset.from_ndds(ndds_dataset=ndds_dataset,
                                 categories=hsr_categories,
                                 naming_rule='type_object_instance_contained',
                                 show_pbar=True,
                                 bbox_area_threshold=1,
                                 allow_same_instance_for_contained=True,
                                 color_interval=5)
dataset.move_images(dst_dir,
                    preserve_filenames=True,
                    update_img_paths=True,
                    overwrite=True,
                    show_pbar=True)
dataset.save_to_path(f'{dst_dir}/output.json', overwrite=True)
dataset.save_video(
    save_path=f'{dst_dir}/preview_with_mask.avi',
    fps=5,
    show_details=True,
    kpt_idx_offset=-1,
)
Esempio n. 8
0
# Fix NDDS Dataset naming so that it follows convention. (This is not necessary if the NDDS dataset already follows the naming convention.)
for frame in ndds_dataset.frames:
    # Fix Naming Convention
    for ann_obj in frame.ndds_ann.objects:
        if ann_obj.class_name.startswith('crescent'):
            obj_type, obj_name = 'seg', 'crescent'
            instance_name = '0'
            ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'

# Convert To COCO Dataset
crescent_categories = COCO_Category_Handler()
crescent_categories.append(
    COCO_Category(id=len(crescent_categories), name='crescent'))
dataset = COCO_Dataset.from_ndds(ndds_dataset=ndds_dataset,
                                 categories=crescent_categories,
                                 naming_rule='type_object_instance_contained',
                                 show_pbar=True,
                                 bbox_area_threshold=-1,
                                 allow_unfound_seg=True)
dataset.move_images(dst_img_dir=dst_dir,
                    preserve_filenames=True,
                    overwrite_duplicates=False,
                    update_img_paths=True,
                    overwrite=True,
                    show_pbar=True)
dataset.save_to_path(f'{dst_dir}/output.json', overwrite=True)
# dataset.display_preview(show_details=True)
dataset.save_video(save_path=f'{dst_dir}/preview.mp4',
                   fps=5,
                   show_details=True)
import cv2,os
from common_utils.common_types.keypoint import Keypoint2D_List, Keypoint2D
from common_utils.common_types.segmentation import Segmentation, Polygon
import shapely
from shapely import ops
import numpy as np
from common_utils.cv_drawing_utils import draw_keypoints, cv_simple_image_viewer, draw_bbox, draw_segmentation

from imageaug import AugHandler, Augmenter as aug

save_img_path = "/home/pasonatech/detectron/detectron2/gbox/vis_image/"

dataset = COCO_Dataset.load_from_path(
    
    json_path='/home/pasonatech/combined_cocooutput/HSR-coco.json',
    img_dir='/home/pasonatech/combined_cocooutput'

    #json_path='/home/pasonatech/aug_real_combine/aug_sim_com_garbage/HSR-coco.json',
    #img_dir='/home/pasonatech/aug_real_combine/aug_sim_com_garbage'
)

resize_save_path = 'test_resize.json'
handler_save_path = 'test_handler.json'
if not file_exists(handler_save_path):
    handler = AugHandler(
        [
            aug.Crop(percent=[0.2, 0.5]),
            aug.Flipud(p=0.5),
            aug.Superpixels()
            # aug.Sharpen(alpha=[-1,0.1], lightness=[0,3])
        ]
    )
Esempio n. 10
0
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(json_path='output.json')
# dataset.display_preview()
dump_dir = 'ann_dump'
make_dir_if_not_exists(dump_dir)
delete_all_files_in_dir(dump_dir, ask_permission=False)

dataset.info.save_to_path(f'{dump_dir}/info.json')
dataset.images.save_to_path(f'{dump_dir}/images.json')
dataset.annotations.save_to_path(f'{dump_dir}/annotations.json')
dataset.categories.save_to_path(f'{dump_dir}/categories.json')
dataset.categories.save_to_path('box_hsr_categories.json')
Esempio n. 11
0
def infer(
    path: str,
    weights_path: str,
    thresh: int = 0.5,
    key: str = 'R',
    infer_dump_dir: str = '',
    model: str = 'mask_rcnn_R_50_FPN_1x',
    size: int = 1024,
    class_names: List[str] = ['hook'],
    gt_path:
    str = '/home/jitesh/3d/data/coco_data/hook_test/json/cropped_hook.json'):
    # class_names=['hook', 'pole']
    # class_names=['hook']
    conf_thresh = 0.001
    show_bbox_border = True
    gt_dataset = COCO_Dataset.load_from_path(json_path=gt_path)
    inferer_seg = inferer(
        weights_path=weights_path,
        confidence_threshold=0.1,
        # num_classes=1,
        # num_classes=2,
        class_names=class_names,
        # class_names=['hook'],
        model='keypoint_rcnn_R_50_FPN_1x',
        # model='faster_rcnn_X_101_32x8d_FPN_3x',
        # model='faster_rcnn_R_101_FPN_3x',
        # model=model,
    )
    inferer_seg.cfg.INPUT.MIN_SIZE_TEST = size
    inferer_seg.cfg.INPUT.MAX_SIZE_TEST = size
    inferer_seg.cfg.MODEL.MASK_ON = True

    weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7/weights/Keypoints_R_50_1x_aug_cm_seg_val_1/model_0009999.pth'
    weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7_0.1/weights/Keypoints_R_50_1x_aug_cm_seg_val_3/model_0009999.pth'
    weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7_0.1/weights/Keypoints_R_50_1x_aug_cm_seg_val_1/model_0007999.pth'
    weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_1/model_0009999.pth'
    weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_2/model_0004999.pth'
    # inferer_key = jDetectron2KeypointInferer(
    #     weights_path=weights_path,
    #     # ref_coco_ann_path=f'/home/jitesh/3d/data/coco_data/hook_real1/json/hook.json',
    #     # categories_path=f'/home/jitesh/3d/data/categories/hook_infer.json',
    #     # categories_path=f'/home/jitesh/3d/data/categories/hook_7ckpt.json',
    #     categories_path=f'/home/jitesh/3d/data/categories/hook_7ckpt_pole.json',
    #     target_category='hook',
    #     model_name='keypoint_rcnn_R_50_FPN_1x',
    #     bbox_threshold=bbox_thresh,
    #     kpt_threshold=kpt_thresh,
    #     key_box='hook',
    # )
    # k_size = 1024
    # inferer_key.cfg.INPUT.MIN_SIZE_TEST = k_size
    # inferer_key.cfg.INPUT.MAX_SIZE_TEST = k_size

    possible_modes = ['save', 'preview']
    mode = 'save'
    check_value(mode, valid_value_list=possible_modes)
    # make_dir_if_not_exists(infer_dump_dir)
    img_extensions = ['jpg', 'JPG', 'png', 'PNG']
    img_pathlist = get_all_files_in_extension_list(
        dir_path=f'{path}', extension_list=img_extensions)
    img_pathlist.sort()

    confirm_folder(infer_dump_dir, mode)
    # confirm_folder(f'{infer_dump_dir}/good_seg', mode)
    # confirm_folder(f'{infer_dump_dir}/good_cropped', mode)
    # confirm_folder(f'{infer_dump_dir}/good', mode)
    # confirm_folder(f'{infer_dump_dir}/G(>4D) P(>4D)', mode)
    # confirm_folder(f'{infer_dump_dir}/G(>4D) P(<4D)', mode)
    # confirm_folder(f'{infer_dump_dir}/G(<4D) P(>4D)', mode)
    # confirm_folder(f'{infer_dump_dir}/G(<4D) P(<4D)', mode)
    # confirm_folder(f'{infer_dump_dir}/bad', mode)
    confirm_folder(f'{infer_dump_dir}/infer_key_seg', mode)

    count = 0
    start = datetime.now()
    df = pd.DataFrame(data=[],
                      columns=[
                          'gt_d',
                          'pred_d',
                          'gt_ab',
                          'pred_ab',
                          'gt_ratio',
                          'pred_ratio',
                          'gt_ratio>4',
                          'pred_ratio>4',
                          'correct_above4d_ratio',
                          'incorrect_above4d_ratio',
                          'correct_below4d_ratio',
                          'incorrect_below4d_ratio',
                      ])
    #  'image_path'])
    for i, img_path in enumerate(tqdm(
            img_pathlist,
            desc='Writing images',
    )):
        img_filename = get_filename(img_path)
        # if not '201005_70_縮小革命PB020261.jpg' in img_path:
        #     continue
        # if i > 19:
        #     continue
        printj.purple(img_path)
        img = cv2.imread(img_path)
        result = img
        # print(f'shape {img.shape}')
        # cv2.imshow('i', img)
        # cv2.waitKey(100000)
        # continue
        score_list, pred_class_list, bbox_list, pred_masks_list, pred_keypoints_list, vis_keypoints_list, kpt_confidences_list = inferer_seg.predict(
            img=img)
        # printj.blue(pred_masks_list)
        max_hook_score = -1
        max_pole_score = -1
        diameter = -1
        len_ab = -1
        found_hook = False
        found_pole = False
        for score, pred_class, bbox, mask, keypoints, vis_keypoints, kpt_confidences in zip(
                score_list, pred_class_list, bbox_list, pred_masks_list,
                pred_keypoints_list, vis_keypoints_list, kpt_confidences_list):

            if pred_class == 'pole':
                found_pole = True
                if max_pole_score < score:
                    # if True:
                    max_pole_score = score
                    diameter = compute_diameter(mask)
                    # result = draw_bool_mask(img=result, mask=mask, color=[
                    #                     0, 255, 255],
                    #                     transparent=True
                    #                     )
                    pole_bbox_text = f'pole {str(round(score, 2))}'
                    pole_bbox = bbox
                    pole_mask = mask
                    # result = draw_bbox(img=result, bbox=bbox,
                    #                    text=pole_bbox_text, label_only=not show_bbox_border, label_orientation='bottom')
                    printj.blue(f'diameter={diameter}')
            if pred_class == 'hook':
                # printj.green.bold_on_yellow(score)
                found_hook = True
                if max_hook_score < score:
                    # if True:
                    max_hook_score = score
                    hook_bbox = BBox.buffer(bbox)
                    hook_score = round(score, 2)
                    hook_mask = mask
                    hook_keypoints = keypoints
                    hook_vis_keypoints = vis_keypoints
                    hook_kpt_confidences = kpt_confidences
                    # xmin, ymin, xmax, ymax = bbox.to_int().to_list()
                    # _xmin, _ymin, _xmax, _ymax = _bbox.to_int().to_list()
                    # width = _xmax-_xmin
                    # height = _ymax-_ymin
                    # scale = 0.2
                    # xmin = max(int(_xmin - width*scale), 0)
                    # xmax = min(int(_xmax + width*scale), img.shape[1])
                    # ymin = max(int(_ymin - height*scale), 0)
                    # ymax = min(int(_ymax + height*scale), img.shape[0])

                    # printj.red(score)
                    # printj.red(bbox)
                    # return
                    # img = draw_bbox(img=img, bbox=_bbox, color=[
                    #                 0, 255, 255], thickness=2, text=f"{pred_class} {round(score, 3)}",
                    #                 label_orientation='top')
                    # img = draw_bbox(img=img, bbox=_bbox, color=[
                    #                 0, 255, 255], thickness=2, text=f"{pred_class} {round(score, 3)}",
                    #                 label_orientation='bottom')
                    # result = draw_bool_mask(img=result, mask=mask, color=[
                    #     255, 255, 0],
                    #     transparent=True
                    # )
                    # result = result
                    # bbox_text = str(round(score, 4))
                    # result = draw_bbox(img=result, bbox=bbox,
                    #                    text=bbox_text, label_only=not show_bbox_border)
                    bbox_label_mode = 'euler'
                    # result = draw_keypoints(
                    #     img=result, keypoints=vis_keypoints, radius=2, color=[0, 0, 255],
                    #     # keypoint_labels=kpt_labels, show_keypoints_labels=True, label_thickness=1,
                    #     # ignore_kpt_idx=conf_idx_list
                    #     )
                    kpt_labels = [
                        "kpt-a", "kpt-b", "kpt-cb", "kpt-c", "kpt-cd", "kpt-d",
                        "kpt-e"
                    ]
                    kpt_skeleton = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5],
                                    [5, 6]]
                    conf_idx_list = np.argwhere(
                        np.array(kpt_confidences) > conf_thresh).reshape(-1)
                    not_conf_idx_list = np.argwhere(
                        np.array(kpt_confidences) <= conf_thresh).reshape(
                            -1).astype(int)
                    conf_keypoints, conf_kpt_labels = np.array(vis_keypoints)[
                        conf_idx_list], np.array(kpt_labels)[conf_idx_list]
                    not_conf_keypoints, not_conf_kpt_labels = np.array(
                        vis_keypoints)[not_conf_idx_list], np.array(
                            kpt_labels)[not_conf_idx_list]
                    cleaned_keypoints = np.array(vis_keypoints.copy()).astype(
                        np.float32)
                    # result = draw_bool_mask(img=result, mask=mask, color=[
                    #     255, 255, 0],
                    #     transparent=True
                    # )
                    # result, len_ab = draw_inference_on_hook2(img=result, cleaned_keypoints=cleaned_keypoints, kpt_labels=kpt_labels, kpt_skeleton=kpt_skeleton,
                    #                                         score=score, bbox=_bbox, vis_keypoints=vis_keypoints, kpt_confidences=kpt_confidences, conf_idx_list=conf_idx_list, not_conf_idx_list=not_conf_idx_list,
                    #                                         conf_keypoints=conf_keypoints, conf_kpt_labels=conf_kpt_labels, not_conf_keypoints=not_conf_keypoints, not_conf_kpt_labels=not_conf_kpt_labels,
                    #                                         conf_thresh=conf_thresh, show_bbox_border=show_bbox_border, bbox_label_mode=bbox_label_mode, index_offset=0, diameter=diameter)
                    # result=result
                    # printj.green(_bbox)
                    # printj.green(_bbox.to_int())
                    # printj.green(_bbox.to_int().to_list())
        printj.green.on_white(max_hook_score)
        if found_pole:
            result = draw_bool_mask(img=result,
                                    mask=pole_mask,
                                    color=[0, 255, 255],
                                    transparent=True)
            result = draw_bbox(img=result,
                               bbox=pole_bbox,
                               text=pole_bbox_text,
                               label_only=not show_bbox_border,
                               label_orientation='top')
            result = draw_bbox(img=result,
                               bbox=pole_bbox,
                               text=pole_bbox_text,
                               label_only=not show_bbox_border,
                               label_orientation='bottom')
        if found_hook:
            result = draw_bool_mask(img=result,
                                    mask=hook_mask,
                                    color=[255, 255, 0],
                                    transparent=True)
            result, len_ab = draw_inference_on_hook2(
                img=result,
                cleaned_keypoints=cleaned_keypoints,
                kpt_labels=kpt_labels,
                kpt_skeleton=kpt_skeleton,
                score=hook_score,
                bbox=hook_bbox,
                vis_keypoints=hook_vis_keypoints,
                kpt_confidences=hook_kpt_confidences,
                conf_idx_list=conf_idx_list,
                not_conf_idx_list=not_conf_idx_list,
                conf_keypoints=conf_keypoints,
                conf_kpt_labels=conf_kpt_labels,
                not_conf_keypoints=not_conf_keypoints,
                not_conf_kpt_labels=not_conf_kpt_labels,
                conf_thresh=conf_thresh,
                show_bbox_border=show_bbox_border,
                bbox_label_mode=bbox_label_mode,
                index_offset=0,
                diameter=diameter)
        printj.purple(len_ab)
        if len_ab == 0:
            printj.green(keypoints)
        result = draw_info_box(result, len_ab, diameter)
        #                 img: np.ndarray, cleaned_keypoints, kpt_labels: List[str], kpt_skeleton: List[list],
        # score: float, bbox: BBox, vis_keypoints: list, kpt_confidences: list, conf_idx_list: list, not_conf_idx_list: list,
        # conf_keypoints, conf_kpt_labels, not_conf_keypoints, not_conf_kpt_labels,
        # conf_thresh: float = 0.3, show_bbox_border: bool = False, bbox_label_mode: str = 'euler', index_offset: int = 0, diameter=1

        # cv2.imshow('i', result)
        # # cv2.imwrite('i', result)
        # cv2.waitKey(10000)
        # quit_flag = cv_simple_image_viewer(img=result, preview_width=1000)
        # if quit_flag:
        #     break

        # cv2.imwrite(f"{infer_dump_dir}/good_seg/{img_filename}", result)
        cv2.imwrite(f"{infer_dump_dir}/infer_key_seg/{img_filename}", result)
Esempio n. 12
0
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200228/28_02_2020_11_18_30_coco-data/HSR-coco.json',  # Modify this path
    check_paths=False)

dataset.print_handler_lengths()
Esempio n. 13
0
from annotation_utils.coco.structs import COCO_Dataset

# dataset = COCO_Dataset.load_from_path(
#     json_path='/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200228/28_02_2020_11_18_30_coco-data/HSR-coco.json',
#     img_dir='/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200228/28_02_2020_11_18_30_coco-data'
# )
dataset = COCO_Dataset.combine_from_config(
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/config/json/box_hsr_kpt_real.json'
)
dataset.images.sort(attr_name='file_name')
dataset.save_video(save_path='dataset.mp4',
                   show_annotations=True,
                   fps=5,
                   show_preview=True,
                   kpt_idx_offset=-1,
                   overwrite=True,
                   show_bbox=False)
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path('measure_total_test.json')

dataset.remove_all_categories_except(['measure'])

dataset.display_preview(show_details=True)
Esempio n. 15
0
from annotation_utils.coco.structs import COCO_Dataset
from logger import logger
from common_utils.image_utils import concat_n_images
from common_utils.cv_drawing_utils import cv_simple_image_viewer
from common_utils.file_utils import file_exists
import cv2
from common_utils.common_types.keypoint import Keypoint2D_List, Keypoint2D

from imageaug import AugHandler, Augmenter as aug

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/Users/darwinharianto/Desktop/hayashida/Unreal/18_03_2020_18_03_10_coco-data/HSR-coco.json',
    img_dir=
    '/Users/darwinharianto/Desktop/hayashida/Unreal/18_03_2020_18_03_10_coco-data'
)

resize_save_path = 'test_resize.json'
handler_save_path = 'test_handler.json'
if not file_exists(resize_save_path):
    resize = aug.Resize(width=500, height=500)
    resize.save_to_path(save_path=resize_save_path, overwrite=True)
    logger.info(f'Created new Resize save.')
else:
    resize = aug.Resize.load_from_path(resize_save_path)
    logger.info(f'Loaded Resize from save.')
if not file_exists(handler_save_path):
    handler = AugHandler([
        aug.Crop(percent=[0.2, 0.5]),
        aug.Flipud(p=0.5),
        aug.Superpixels()
Esempio n. 16
0
from annotation_utils.labelme.structs import LabelmeAnnotationHandler
from annotation_utils.coco.structs import COCO_Dataset, COCO_Category_Handler

# Path Variables
img_dir = ''
json_dir = ''
categories_conf_path = ''

# Load Labelme Annotation Handler
labelme_handler = LabelmeAnnotationHandler.load_from_dir(load_dir=json_dir)

# Load COCO Dataset from Labelme Annotation Handler
coco_dataset = COCO_Dataset.from_labelme(
    labelme_handler=labelme_handler,
    categories=COCO_Category_Handler.load_from_path(categories_conf_path),
    img_dir=img_dir
)

# Save COCO Dataset to a file
coco_dataset.save_to_path(save_path='', overwrite=True)

# Preview COCO Dataset
coco_dataset.display_preview(show_details=True)
coco_dataset.save_visualization(show_details=True)
# coco_dataset.save_video(show_details=True)
Esempio n. 17
0
            logger.error(f'ann_obj.class_name: {ann_obj.class_name}')
            # raise Exception

    # Delete Duplicate Objects
    frame.ndds_ann.objects.delete_duplicates(verbose=True,
                                             verbose_ref=frame.img_path)

# ndds_dataset.save_to_path(save_path=f'{coco_data_dir}/hook_fixed_ndds.json', overwrite=True)

# Convert To COCO Dataset
dataset = COCO_Dataset.from_ndds(
    ndds_dataset=ndds_dataset,
    # categories=COCO_Category_Handler.load_from_path(f'/home/jitesh/3d/data/categories/hook_7ckpt.json'),
    categories=COCO_Category_Handler.load_from_path(
        f'/home/jitesh/3d/data/categories/tropicana.json'),
    naming_rule='type_object_instance_contained',
    ignore_unspecified_categories=True,
    show_pbar=True,
    bbox_area_threshold=1,
    default_visibility_threshold=-1,
    allow_unfound_seg=True,
)
make_dir_if_not_exists(coco_data_dir)
img_path = f'{coco_data_dir}/img'
make_dir_if_not_exists(coco_data_dir)
ann_dir = f'{coco_data_dir}/json'
make_dir_if_not_exists(ann_dir)
dataset.move_images(dst_img_dir=img_path,
                    preserve_filenames=False,
                    update_img_paths=True,
                    overwrite=True,
                    show_pbar=True)
from annotation_utils.coco.structs import COCO_Dataset, \
    COCO_License, COCO_Image, COCO_Annotation, COCO_Category
from common_utils.common_types.bbox import BBox
from logger import logger

dataset = COCO_Dataset.new(description='Test')
dataset.categories.append(
    COCO_Category(id=len(dataset.categories),
                  supercategory='test_category',
                  name='category_a'))
dataset.categories.append(
    COCO_Category(id=len(dataset.categories),
                  supercategory='test_category',
                  name='category_b'))
dataset.categories.append(
    COCO_Category(id=len(dataset.categories),
                  supercategory='test_category',
                  name='category_c'))

for i in range(10):
    dataset.licenses.append(
        COCO_License(url=f'test_license_{i}',
                     name=f'Test License {i}',
                     id=len(dataset.licenses)))
for i in range(20):
    dataset.images.append(
        COCO_Image(license_id=i % len(dataset.licenses),
                   file_name=f'{i}.jpg',
                   coco_url=f'/path/to/{i}.jpg',
                   height=500,
                   width=500,
Esempio n. 19
0
bolt_roi_categories.append(
    COCO_Category(
        id=len(bolt_roi_categories),
        name='bolt-roi'
      
    )
)
print(f"Bolt_roi_categories :{bolt_roi_categories}")
bolt_roi_dataset = COCO_Dataset.from_ndds(
    ndds_dataset=ndds_dataset,
    categories=bolt_roi_categories,
    naming_rule='type_object_instance_contained', delimiter='_',
    ignore_unspecified_categories=True,
    show_pbar=True,
    bbox_area_threshold=1,
    default_visibility_threshold=0.01,
    visibility_threshold_dict={'bolt-roi': 0.01},
    allow_unfound_seg=False,
    class_merge_map={
        'seg_mark-inner_0': 'seg_bolt-roi_0',
        'seg_mark-middle_0': 'seg_bolt-roi_0',
        'seg_mark-outer_0': 'seg_bolt-roi_0'
    }
)
bolt_roi_dst_dir = f'{target_dst_dir}/bolt_roi'
make_dir_if_not_exists(bolt_roi_dst_dir)
bolt_roi_dataset.move_images(
    dst_img_dir=bolt_roi_dst_dir,
    preserve_filenames=True, overwrite_duplicates=False, update_img_paths=True, overwrite=True,
    show_pbar=True
)
bolt_roi_dataset.save_to_path(f'{bolt_roi_dst_dir}/output.json', overwrite=True)
Esempio n. 20
0
from annotation_utils.labelme.structs import LabelmeAnnotationHandler
from annotation_utils.coco.structs import COCO_Dataset, COCO_Category_Handler

img_dir = '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/real/phone_videos/new/sampled_data/VID_20200217_161043/img'
json_dir = '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/real/phone_videos/new/sampled_data/VID_20200217_161043/json'

labelme_handler = LabelmeAnnotationHandler.load_from_dir(load_dir=json_dir)

coco_dataset = COCO_Dataset.from_labelme(
    labelme_handler=labelme_handler,
    categories=COCO_Category_Handler.load_from_path(
        '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/config/categories/hsr_categories.json'
    ),
    img_dir=img_dir)
coco_dataset.save_to_path(save_path='output.json', overwrite=True)

test_coco_dataset = COCO_Dataset.load_from_path(json_path='output.json')
test_coco_dataset.display_preview(kpt_idx_offset=-1)
Esempio n. 21
0
# logger.cyan(info1.contributor)

# logger.purple(f'info:\n{info}')
# logger.purple(f'info0:\n{info0}')

# logger.purple(f'info.to_dict():\n{info.to_dict()}')

info2 = COCO_Info.from_dict({
    'description': 'This is a test',
    'url': 'https://test/url.com',
    'version': '1.0',
    'year': '2020',
    'contributor': 'Clayton',
    'date_created': '2020/03/10'
})
logger.purple(f'info2:\n{info2}')

info2.save_to_path('info.json', overwrite=True)

from annotation_utils.coco.structs import COCO_Dataset
dataset = COCO_Dataset.combine_from_config(
    config_path=
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/config/yaml/box_hsr_kpt_sim.yaml',
    img_sort_attr_name='file_name',
    show_pbar=True)
# dataset.display_preview(kpt_idx_offset=-1)
coco_ann = dataset.annotations[123]
logger.purple(coco_ann)
from common_utils.common_types.keypoint import Keypoint3D_List
coco_ann.keypoints_3d = Keypoint3D_List()
logger.purple(f'coco_ann.to_dict():\n{coco_ann.to_dict()}')
Esempio n. 22
0
from annotation_utils.linemod.objects import Linemod_Dataset, LinemodCamera
from annotation_utils.coco.structs import COCO_Dataset

inferer = PVNetInferer(
    # weight_path='/home/clayton/workspace/git/clean-pvnet/data/model/pvnet/custom/99.pth',
    # weight_path='/home/clayton/workspace/prj/data_keep/data/misc_dataset/darwin_weights/194.pth',
    weight_path=
    '/home/clayton/workspace/prj/data_keep/data/weights/pvnet_hsr/20201119/499.pth'
)
# img_dir = '/home/clayton/workspace/git/pvnet-rendering/test/renders1'
img_dir = '/home/clayton/workspace/prj/data_keep/data/misc_dataset/darwin_datasets/nihonbashi2/organized'
linemod_dataset = Linemod_Dataset.load_from_path(f'{img_dir}/train.json')

coco_dataset = COCO_Dataset.load_from_path(
    json_path=
    '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera/combined/output.json',
    img_dir=
    '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera/combined'
)
linemod_ann_sample = linemod_dataset.annotations[0]
kpt_3d = linemod_ann_sample.fps_3d.copy()
kpt_3d.append(linemod_ann_sample.center_3d)
corner_3d = linemod_ann_sample.corner_3d
K = linemod_ann_sample.K
linemod_image_sample = linemod_dataset.images[0]
dsize = (linemod_image_sample.width, linemod_image_sample.height)

inferer.infer_coco_dataset(dataset=coco_dataset,
                           kpt_3d=kpt_3d,
                           corner_3d=corner_3d,
                           K=K,
                           blackout=True,
Esempio n. 23
0
    'pvnet-darwin20210105-epoch599',
    'pvnet-darwin20210105-epoch699',
    'pvnet-darwin20210105-epoch799',
]
test_root_dir = '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera'
csv_paths = recursively_get_all_filepaths_of_extension(test_root_dir,
                                                       extension='csv')
test_names, datasets = [], []
for csv_path in csv_paths:
    test_name = get_rootname_from_path(csv_path)
    img_dir = f'{get_dirpath_from_filepath(csv_path)}/images'
    assert dir_exists(img_dir), f"Couldn't find image directory: {img_dir}"
    ann_path = f'{img_dir}/output.json'
    if not file_exists(ann_path):
        continue
    dataset = COCO_Dataset.load_from_path(ann_path, img_dir=img_dir)
    test_names.append(test_name)
    datasets.append(dataset)

linemod_dataset = Linemod_Dataset.load_from_path(
    f'/home/clayton/workspace/prj/data/misc_dataset/darwin_datasets/coco2linemod/darwin20210105_blackout/train.json'
)
linemod_ann_sample = linemod_dataset.annotations[0]
kpt_3d = linemod_ann_sample.fps_3d.copy()
kpt_3d.append(linemod_ann_sample.center_3d)
corner_3d = linemod_ann_sample.corner_3d
# K = linemod_ann_sample.K
K = np.array([
    517.799858, 0.000000, 303.876287, 0.000000, 514.807834, 238.157119,
    0.000000, 0.000000, 1.000000
]).reshape(3, 3)
Esempio n. 24
0
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    '/home/clayton/workspace/prj/data_keep/data/dataset/bird/img/output.json',
    img_dir='/home/clayton/workspace/prj/data_keep/data/dataset/bird/img')

# split_image_handlers = dataset.images.split(ratio=[1, 3, 1], shuffle=True)
# for image_handler in split_image_handlers:
#     print(f'len(image_handler): {len(image_handler)}')
#     print(f'\tfilenames: {[coco_image.file_name for coco_image in image_handler]}')

parts = dataset.split_into_parts(ratio=[1, 2, 3], shuffle=True)
part_count = -1
for part in parts:
    part_count += 1
    print(f'len(part.images): {len(part.images)}')
    part.save_video(save_path=f'part{part_count}.avi',
                    fps=5,
                    show_details=True)
Esempio n. 25
0
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200228/28_02_2020_11_18_30_coco-data/HSR-coco.json',
    img_dir=
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200228/28_02_2020_11_18_30_coco-data'
)
dataset.images.sort(attr_name='file_name')
dataset.save_visualization(save_dir='test_vis',
                           show_preview=True,
                           kpt_idx_offset=-1,
                           overwrite=True)
Esempio n. 26
0
inferer = PVNetInferer(
    weight_path=
    '/home/clayton/workspace/prj/data_keep/data/weights/pvnet_hsr/20201119/499.pth'
)
linemod_dataset = Linemod_Dataset.load_from_path(
    '/home/clayton/workspace/prj/data_keep/data/misc_dataset/clayton_datasets/combined/output.json'
)
kpt_3d, corner_3d, K = linemod_dataset.sample_3d_hyperparams(
    idx=0)  # Should be constant
dsize = linemod_dataset.sample_dsize(idx=0)  # Assuming constant resolution

# Define test dataset that you want to run inference on
img_dir = '/home/clayton/workspace/prj/data_keep/data/misc_dataset/darwin_datasets/darwin20201209/coco_data'
ann_path = f'{img_dir}/coco_annotations.json'
test_dataset = COCO_Dataset.load_from_path(ann_path,
                                           img_dir=img_dir,
                                           strict=False)
test_dataset, _ = test_dataset.split_into_parts(
    ratio=[100, len(test_dataset.images) - 100], shuffle=True)

# Define a StreamWriter for creating a preview window and saving video
stream_writer = StreamWriter(show_preview=True,
                             video_save_path='example_pnp_inference.avi')
draw_settings = PnpDrawSettings()
draw_settings.direction_line_color = (255, 255, 0)

# Initialize Inference Dump Data Handler
pred_list = PVNetFrameResultList()

# Inference Loop
pbar = tqdm(total=len(test_dataset.images), unit='image(s)', leave=True)
    categories.append( # Simple Keypoint Example
        COCO_Category.from_label_skeleton(
            id=len(categories),
            supercategory='pet',
            name='cat',
            label_skeleton=[
                ['left_eye', 'right_eye'],
                ['mouth_left', 'mouth_center'], ['mouth_center', 'mouth_right']
            ]
        )
    )
    for name in ['duck', 'sparrow', 'pigion']:
        categories.append( # Simple Non-Keypoint Example
            COCO_Category(
                id=len(categories),
                supercategory='bird',
                name=name
            )
        )
    categories.save_to_path('categories_example.json')
else: # Or load from an existing categories json
    categories = COCO_Category_Handler.load_from_path('categories_example.json')

# Convert To COCO
coco_dataset = COCO_Dataset.from_labelme(
    labelme_handler=labelme_handler,
    categories=categories,
    img_dir=img_dir
)
coco_dataset.save_to_path(save_path='converted_coco.json', overwrite=True)
coco_dataset.display_preview(show_details=True) # Optional: Preview your resulting dataset.
Esempio n. 28
0
    for ann_obj in frame.ndds_ann.objects:
        if ann_obj.class_name.startswith('hsr'):
            obj_type, obj_name = 'seg', 'hsr'
            instance_name = ann_obj.class_name.replace('hsr', '')
            ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'
        elif ann_obj.class_name.startswith('point'):
            obj_type, obj_name = 'kpt', 'hsr'
            temp = ann_obj.class_name.replace('point', '')
            instance_name, contained_name = temp[1], temp[0]
            ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}_{contained_name}'
        else:
            logger.error(f'ann_obj.class_name: {ann_obj.class_name}')
            raise Exception

    # Delete Duplicate Objects
    frame.ndds_ann.objects.delete_duplicates(verbose=True,
                                             verbose_ref=frame.img_path)

ndds_dataset.save_to_path(save_path='hsr_fixed_ndds.json', overwrite=True)

# Convert To COCO Dataset
dataset = COCO_Dataset.from_ndds(
    ndds_dataset=ndds_dataset,
    categories=COCO_Category_Handler.load_from_path(
        '/home/clayton/workspace/prj/data_keep/data/ndds/categories/hsr.json'),
    naming_rule='type_object_instance_contained',
    show_pbar=True,
    bbox_area_threshold=1)

dataset.save_to_path('hsr_ndds2coco_test.json', overwrite=True)
dataset.display_preview(show_details=True)
Esempio n. 29
0
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path('measure_coco/measure/output.json')
dataset.save_video(
    save_path='merged_mask_measure_viz.mp4',
    show_details=True,
    fps=3
)
Esempio n. 30
0
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/home/clayton/Downloads/screw1_mark-100-_coco-data/screw-coco.json',
    img_dir='/home/clayton/Downloads/screw1_mark-100-_coco-data',
    strict=False)
screw_dataset, mark_dataset = dataset.copy(), dataset.copy()
screw_dataset.remove_categories_by_name(category_names=['mark'])
mark_dataset.remove_categories_by_name(category_names=['screw'])
screw_dataset.save_to_path(save_path='screw.json', overwrite=True)
mark_dataset.save_to_path(save_path='mark.json', overwrite=True)

screw_dataset.display_preview(show_details=True, show_seg=False)
mark_dataset.display_preview(show_details=True, )