Beispiel #1
0
def run(
    path: str,
    key: str = 'measure',
    show_image: bool = False,
    show_video: bool = True,
):
    dataset = COCO_Dataset.load_from_path(json_path=f'{path}/{key}-coco.json',
                                          img_dir=path,
                                          strict=False)
    # for i, coco_ann in enumerate(dataset.annotations):
    #     if i % 2 == 0:
    #         coco_ann.segmentation = Segmentation()
    #     if i % 3 == 0:
    #         coco_ann.keypoints = Keypoint2D_List()
    #         coco_ann.num_keypoints = 0
    for coco_ann in dataset.annotations:
        coco_ann.segmentation = Segmentation()
        coco_ann.keypoints = Keypoint2D_List()
        coco_ann.num_keypoints = 0
        coco_ann.keypoints_3d = None
        coco_ann.camera = None
    for coco_cat in dataset.categories:
        coco_cat.keypoints = []
        coco_cat.skeleton = []
    dataset.save_to_path('non_strict_dataset.json',
                         overwrite=True,
                         strict=False)
    dataset0 = COCO_Dataset.load_from_path('non_strict_dataset.json',
                                           strict=False)
    dataset0.images.sort(attr_name='file_name')

    if show_image:
        dataset0.save_visualization(save_dir=f'{path}_{dt_string3}_visualize',
                                    show_preview=True,
                                    kpt_idx_offset=-1,
                                    overwrite=True,
                                    show_details=True)
    if show_video:
        dataset0.save_video(
            save_path=f'{path}_{dt_string3}.mp4',
            # draw_order=['screw'],
            show_details=True,
            show_preview=True,
            kpt_idx_offset=-1,
            overwrite=True,
            fps=5,
            show_seg=True)
    logger.green('Visualisation complete')
def coco_json_fixer(coco_img_dir: str = None, fix_status: bool = True) -> str:
    # def coco_json_fixer(coco_img_dir:str = None, blend_coco_anno:str = None, new_coco_anno:str = None, fix_status:bool=True):
    if fix_status:
        #fix coco
        org_coco_file = os.path.join(coco_img_dir, "coco_annotations.json")
        chk_file_exists(org_coco_file)
        fixed_coco_file = os.path.join(coco_img_dir,
                                       "fixed_coco_annotations.json")
        dataset = COCO_Dataset.load_from_path(org_coco_file,
                                              strict=False,
                                              img_dir=coco_img_dir)
        dataset.save_to_path(fixed_coco_file, strict=True, overwrite=True)
        seg_filter(fixed_coco_file)
        return fixed_coco_file
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(json_path='output.json')
# dataset.display_preview()
dump_dir = 'ann_dump'
make_dir_if_not_exists(dump_dir)
delete_all_files_in_dir(dump_dir, ask_permission=False)

dataset.info.save_to_path(f'{dump_dir}/info.json')
dataset.images.save_to_path(f'{dump_dir}/images.json')
dataset.annotations.save_to_path(f'{dump_dir}/annotations.json')
dataset.categories.save_to_path(f'{dump_dir}/categories.json')
dataset.categories.save_to_path('box_hsr_categories.json')
Beispiel #4
0
inferer = PVNetInferer(
    weight_path=
    '/home/clayton/workspace/prj/data_keep/data/weights/pvnet_hsr/20201119/499.pth'
)
linemod_dataset = Linemod_Dataset.load_from_path(
    '/home/clayton/workspace/prj/data_keep/data/misc_dataset/clayton_datasets/combined/output.json'
)
kpt_3d, corner_3d, K = linemod_dataset.sample_3d_hyperparams(
    idx=0)  # Should be constant
dsize = linemod_dataset.sample_dsize(idx=0)  # Assuming constant resolution

# Define test dataset that you want to run inference on
img_dir = '/home/clayton/workspace/prj/data_keep/data/misc_dataset/darwin_datasets/darwin20201209/coco_data'
ann_path = f'{img_dir}/coco_annotations.json'
test_dataset = COCO_Dataset.load_from_path(ann_path,
                                           img_dir=img_dir,
                                           strict=False)
test_dataset, _ = test_dataset.split_into_parts(
    ratio=[100, len(test_dataset.images) - 100], shuffle=True)

# Define a StreamWriter for creating a preview window and saving video
stream_writer = StreamWriter(show_preview=True,
                             video_save_path='example_pnp_inference.avi')
draw_settings = PnpDrawSettings()
draw_settings.direction_line_color = (255, 255, 0)

# Initialize Inference Dump Data Handler
pred_list = PVNetFrameResultList()

# Inference Loop
pbar = tqdm(total=len(test_dataset.images), unit='image(s)', leave=True)
Beispiel #5
0
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/home/clayton/Downloads/screw1_mark-100-_coco-data/screw-coco.json',
    img_dir='/home/clayton/Downloads/screw1_mark-100-_coco-data',
    strict=False)
screw_dataset, mark_dataset = dataset.copy(), dataset.copy()
screw_dataset.remove_categories_by_name(category_names=['mark'])
mark_dataset.remove_categories_by_name(category_names=['screw'])
screw_dataset.save_to_path(save_path='screw.json', overwrite=True)
mark_dataset.save_to_path(save_path='mark.json', overwrite=True)

screw_dataset.display_preview(show_details=True, show_seg=False)
mark_dataset.display_preview(show_details=True, )
from annotation_utils.linemod.objects import Linemod_Dataset, LinemodCamera
from annotation_utils.coco.structs import COCO_Dataset

inferer = PVNetInferer(
    # weight_path='/home/clayton/workspace/git/clean-pvnet/data/model/pvnet/custom/99.pth',
    # weight_path='/home/clayton/workspace/prj/data_keep/data/misc_dataset/darwin_weights/194.pth',
    weight_path=
    '/home/clayton/workspace/prj/data_keep/data/weights/pvnet_hsr/20201119/499.pth'
)
# img_dir = '/home/clayton/workspace/git/pvnet-rendering/test/renders1'
img_dir = '/home/clayton/workspace/prj/data_keep/data/misc_dataset/darwin_datasets/nihonbashi2/organized'
linemod_dataset = Linemod_Dataset.load_from_path(f'{img_dir}/train.json')

coco_dataset = COCO_Dataset.load_from_path(
    json_path=
    '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera/combined/output.json',
    img_dir=
    '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera/combined'
)
linemod_ann_sample = linemod_dataset.annotations[0]
kpt_3d = linemod_ann_sample.fps_3d.copy()
kpt_3d.append(linemod_ann_sample.center_3d)
corner_3d = linemod_ann_sample.corner_3d
K = linemod_ann_sample.K
linemod_image_sample = linemod_dataset.images[0]
dsize = (linemod_image_sample.width, linemod_image_sample.height)

inferer.infer_coco_dataset(dataset=coco_dataset,
                           kpt_3d=kpt_3d,
                           corner_3d=corner_3d,
                           K=K,
                           blackout=True,
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    '/home/clayton/workspace/prj/data_keep/data/dataset/bird/img/output.json',
    img_dir='/home/clayton/workspace/prj/data_keep/data/dataset/bird/img')

# split_image_handlers = dataset.images.split(ratio=[1, 3, 1], shuffle=True)
# for image_handler in split_image_handlers:
#     print(f'len(image_handler): {len(image_handler)}')
#     print(f'\tfilenames: {[coco_image.file_name for coco_image in image_handler]}')

parts = dataset.split_into_parts(ratio=[1, 2, 3], shuffle=True)
part_count = -1
for part in parts:
    part_count += 1
    print(f'len(part.images): {len(part.images)}')
    part.save_video(save_path=f'part{part_count}.avi',
                    fps=5,
                    show_details=True)
Beispiel #8
0
from annotation_utils.coco.structs import COCO_Dataset

dataset0 = COCO_Dataset.load_from_path(
    '/home/clayton/workspace/prj/data_keep/data/dataset/traffic_light/split/val0/coco/output.json',
    img_dir='/home/clayton/workspace/prj/data_keep/data/dataset/traffic_light/split/val0/img'
)
dataset1 = COCO_Dataset.load_from_path(
    '/home/clayton/workspace/prj/data_keep/data/dataset/traffic_light/split/val1/coco/output.json',
    img_dir='/home/clayton/workspace/prj/data_keep/data/dataset/traffic_light/split/val1/img'
)

combined_dataset = COCO_Dataset.combine([dataset0, dataset1])
combined_dataset.save_to_path('combine_test.json', overwrite=True)
Beispiel #9
0
def complete(
    img_dir: str,
    json_path: str,
    show_preview: bool = False,
    show_image: bool = False,
    show_video: bool = False,
    show_seg: bool = False,
    kpt_idx_offset: int = 0,
):
    logger.yellow("Visualisation starts")
    # dataset = COCO_Dataset.load_from_path(
    #     json_path=json_path,
    #     img_dir=img_dir,
    #     strict=False
    # )
    strict = False
    json_dict = json.load(open(json_path, 'r'))
    dataset = COCO_Dataset.from_dict(json_dict, strict=strict)
    # for i, coco_ann in enumerate(dataset.annotations):
    #     if i % 2 == 0:
    #         coco_ann.segmentation = Segmentation()
    #     if i % 3 == 0:
    #         coco_ann.keypoints = Keypoint2D_List()
    #         coco_ann.num_keypoints = 0
    # for coco_ann in dataset.annotations:
    #     coco_ann.segmentation = Segmentation()
    #     coco_ann.keypoints = Keypoint2D_List()
    #     coco_ann.num_keypoints = 0
    #     coco_ann.keypoints_3d = None
    #     coco_ann.camera = None
    # for coco_cat in dataset.categories:
    #     coco_cat.keypoints = []
    #     coco_cat.skeleton = []
    dataset.save_to_path('non_strict_dataset.json',
                         overwrite=True,
                         strict=False)
    dataset0 = COCO_Dataset.load_from_path('non_strict_dataset.json',
                                           strict=False)
    dataset0.images.sort(attr_name='file_name')
    #Show image
    # if show_preview==True:
    #     cv2.imshow('compare images', compare_images)
    #     # cv2.waitKey(10000)
    #     key = cv2.waitKey(0) & 0xFF
    #     printj.blue(key)
    #     if key == ord('q'):
    #         quit_flag = True
    #         cv2.destroyAllWindows()
    #         # break
    if show_image:
        dataset0.save_visualization(
            save_dir=f'{img_dir}_{dt_string3}_visualize',
            show_preview=show_preview,
            kpt_idx_offset=kpt_idx_offset,
            overwrite=True,
            show_details=True,
            show_seg=show_seg)
    if show_video:
        dataset0.save_video(
            save_path=f'{img_dir}_{dt_string3}.mp4',
            # draw_order=['screw'],
            show_details=True,
            show_preview=show_preview,
            kpt_idx_offset=kpt_idx_offset,
            overwrite=True,
            fps=5,
            show_seg=show_seg)
    logger.green('Visualisation complete')
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path('measure_total_test.json')

dataset.remove_all_categories_except(['measure'])

dataset.display_preview(show_details=True)
Beispiel #11
0
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200228/28_02_2020_11_18_30_coco-data/HSR-coco.json',
    img_dir=
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200228/28_02_2020_11_18_30_coco-data'
)
dataset.images.sort(attr_name='file_name')
dataset.save_visualization(save_dir='test_vis',
                           show_preview=True,
                           kpt_idx_offset=-1,
                           overwrite=True)
Beispiel #12
0
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/home/clayton/workspace/prj/data_keep/data/sekisui/hook/coco/output.json',
    img_dir='/home/clayton/workspace/prj/data_keep/data/sekisui/hook/img')

for coco_image in dataset.images:
    anns = dataset.annotations.get_annotations_from_imgIds([coco_image.id])
    for ann in anns:
        ann.bbox = ann.bbox.scale_about_center(
            scale_factor=1.3,
            frame_shape=[coco_image.height, coco_image.width])

dataset.save_to_path('bbox_resized.json', overwrite=True)
dataset.display_preview(show_details=True, kpt_idx_offset=-1)
Beispiel #13
0
from annotation_utils.coco.structs import COCO_Dataset
from logger import logger
from common_utils.image_utils import concat_n_images
from common_utils.cv_drawing_utils import cv_simple_image_viewer
from common_utils.file_utils import file_exists
import cv2
from common_utils.common_types.keypoint import Keypoint2D_List, Keypoint2D

from imageaug import AugHandler, Augmenter as aug

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/Users/darwinharianto/Desktop/hayashida/Unreal/18_03_2020_18_03_10_coco-data/HSR-coco.json',
    img_dir=
    '/Users/darwinharianto/Desktop/hayashida/Unreal/18_03_2020_18_03_10_coco-data'
)

resize_save_path = 'test_resize.json'
handler_save_path = 'test_handler.json'
if not file_exists(resize_save_path):
    resize = aug.Resize(width=500, height=500)
    resize.save_to_path(save_path=resize_save_path, overwrite=True)
    logger.info(f'Created new Resize save.')
else:
    resize = aug.Resize.load_from_path(resize_save_path)
    logger.info(f'Loaded Resize from save.')
if not file_exists(handler_save_path):
    handler = AugHandler([
        aug.Crop(percent=[0.2, 0.5]),
        aug.Flipud(p=0.5),
        aug.Superpixels()
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200122/coco-data/new_HSR-coco.json',
    img_dir=
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200122/coco-data'
)

for coco_ann in dataset.annotations:
    coco_image = dataset.images.get_images_from_imgIds([coco_ann.image_id])[0]
    coco_ann.bbox = coco_ann.bbox.scale_about_center(
        scale_factor=1.4, frame_shape=[coco_image.height, coco_image.width])

# dataset.display_preview()
labelme_handler = dataset.to_labelme(priority='bbox')

labelme_handler.save_to_dir(
    json_save_dir='test_json',
    src_img_dir=
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200122/coco-data',
    dst_img_dir='test_img')
Beispiel #15
0
from annotation_utils.coco.structs import COCO_Dataset
from common_utils.file_utils import file_exists

ann_save_path = 'preview_dataset.json'
if not file_exists(ann_save_path):
    dataset = COCO_Dataset.combine_from_config(
        config_path=
        '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/config/yaml/box_hsr_kpt_trainval.yaml',
        img_sort_attr_name='file_name',
        show_pbar=True)
    dataset.save_to_path(ann_save_path)
else:
    dataset = COCO_Dataset.load_from_path(json_path=ann_save_path)
dataset.display_preview(kpt_idx_offset=-1, show_details=True)
Beispiel #16
0
from common_utils.file_utils import make_dir_if_not_exists
from annotation_utils.coco.structs import COCO_Dataset

src_img_dir = '/home/clayton/workspace/prj/data_keep/data/dataset/bird/img'
dst_dataset_root_dir = '/home/clayton/workspace/prj/data_keep/data/dataset/bird/dataset_root_dir'
make_dir_if_not_exists(dst_dataset_root_dir)

dataset = COCO_Dataset.load_from_path(  # 18 images -> 2 scenarios x 3 datasets / scenario x 3 images / dataset -> 2 train, 1 val
    json_path=f'{src_img_dir}/output.json',
    img_dir=src_img_dir)
scenario_names = [f'scenario{i}' for i in range(2)]
scenario_datasets = dataset.split_into_parts(ratio=[9, 9], shuffle=True)
for i in range(len(scenario_datasets)):
    scenario_name = f'scenario{i}'
    dst_scenario_dir = f'{dst_dataset_root_dir}/{scenario_name}'
    make_dir_if_not_exists(dst_scenario_dir)
    part_datasets = scenario_datasets[i].split_into_parts(ratio=[3, 3, 3],
                                                          shuffle=True)
    for j in range(len(part_datasets)):
        part_name = f'part{j}'
        dst_part_dir = f'{dst_scenario_dir}/{part_name}'
        make_dir_if_not_exists(dst_part_dir)
        part_datasets[j].move_images(dst_img_dir=dst_part_dir,
                                     preserve_filenames=True,
                                     update_img_paths=True,
                                     overwrite=True,
                                     show_pbar=False)
        part_datasets[j].save_to_path(save_path=f'{dst_part_dir}/output.json',
                                      overwrite=True)
        part_datasets[j].save_video(save_path=f'{dst_part_dir}/preview.avi',
                                    fps=5,
import cv2,os
from common_utils.common_types.keypoint import Keypoint2D_List, Keypoint2D
from common_utils.common_types.segmentation import Segmentation, Polygon
import shapely
from shapely import ops
import numpy as np
from common_utils.cv_drawing_utils import draw_keypoints, cv_simple_image_viewer, draw_bbox, draw_segmentation

from imageaug import AugHandler, Augmenter as aug

save_img_path = "/home/pasonatech/detectron/detectron2/gbox/vis_image/"

dataset = COCO_Dataset.load_from_path(
    
    json_path='/home/pasonatech/combined_cocooutput/HSR-coco.json',
    img_dir='/home/pasonatech/combined_cocooutput'

    #json_path='/home/pasonatech/aug_real_combine/aug_sim_com_garbage/HSR-coco.json',
    #img_dir='/home/pasonatech/aug_real_combine/aug_sim_com_garbage'
)

resize_save_path = 'test_resize.json'
handler_save_path = 'test_handler.json'
if not file_exists(handler_save_path):
    handler = AugHandler(
        [
            aug.Crop(percent=[0.2, 0.5]),
            aug.Flipud(p=0.5),
            aug.Superpixels()
            # aug.Sharpen(alpha=[-1,0.1], lightness=[0,3])
        ]
    )
Beispiel #18
0
from logger import logger
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path='bk_28_02_2020_11_18_30_coco-data/HSR-coco.json',
    img_dir='bk_28_02_2020_11_18_30_coco-data')
logger.purple(
    f'Flag0 len(dataset.images): {len(dataset.images)}, len(dataset.annotations): {len(dataset.annotations)}'
)
dataset.prune_keypoints(min_num_kpts=11, verbose=True)
logger.purple(
    f'Flag1 len(dataset.images): {len(dataset.images)}, len(dataset.annotations): {len(dataset.annotations)}'
)
dataset.move_images(dst_img_dir='test_img',
                    preserve_filenames=True,
                    update_img_paths=True,
                    overwrite=True,
                    show_pbar=True)
logger.purple(
    f'Flag2 len(dataset.images): {len(dataset.images)}, len(dataset.annotations): {len(dataset.annotations)}'
)
dataset.save_to_path(save_path='prune_test.json', overwrite=True)
dataset.display_preview(kpt_idx_offset=-1)
Beispiel #19
0
    'pvnet-darwin20210105-epoch599',
    'pvnet-darwin20210105-epoch699',
    'pvnet-darwin20210105-epoch799',
]
test_root_dir = '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera'
csv_paths = recursively_get_all_filepaths_of_extension(test_root_dir,
                                                       extension='csv')
test_names, datasets = [], []
for csv_path in csv_paths:
    test_name = get_rootname_from_path(csv_path)
    img_dir = f'{get_dirpath_from_filepath(csv_path)}/images'
    assert dir_exists(img_dir), f"Couldn't find image directory: {img_dir}"
    ann_path = f'{img_dir}/output.json'
    if not file_exists(ann_path):
        continue
    dataset = COCO_Dataset.load_from_path(ann_path, img_dir=img_dir)
    test_names.append(test_name)
    datasets.append(dataset)

linemod_dataset = Linemod_Dataset.load_from_path(
    f'/home/clayton/workspace/prj/data/misc_dataset/darwin_datasets/coco2linemod/darwin20210105_blackout/train.json'
)
linemod_ann_sample = linemod_dataset.annotations[0]
kpt_3d = linemod_ann_sample.fps_3d.copy()
kpt_3d.append(linemod_ann_sample.center_3d)
corner_3d = linemod_ann_sample.corner_3d
# K = linemod_ann_sample.K
K = np.array([
    517.799858, 0.000000, 303.876287, 0.000000, 514.807834, 238.157119,
    0.000000, 0.000000, 1.000000
]).reshape(3, 3)
Beispiel #20
0
                obj_type, obj_name, instance_name = 'seg', '90part0', '0'
                ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'

    # Convert To COCO Dataset
    dataset = COCO_Dataset.from_ndds(
        ndds_dataset=ndds_dataset,
        categories=COCO_Category_Handler.load_from_path(
            '/home/clayton/workspace/prj/data_keep/data/ndds/categories/measure_all.json'
        ),
        naming_rule='type_object_instance_contained',
        delimiter='_',
        ignore_unspecified_categories=True,
        show_pbar=True,
        bbox_area_threshold=1,
        exclude_invalid_polygons=True)
    dataset.save_to_path(result_json)
else:
    dataset = COCO_Dataset.load_from_path(result_json)

dataset.remove_categories_by_name(category_names=['measure'])
dataset.images.sort(attr_name='file_name')
dataset.save_visualization(save_dir='measure_test_vis',
                           preserve_filenames=True,
                           show_details=True,
                           bbox_thickness=1,
                           bbox_label_thickness=1,
                           bbox_label_color=[0, 255, 0],
                           bbox_label_orientation='right',
                           show_seg=True,
                           seg_color=[0, 0, 255],
                           seg_transparent=False)
Beispiel #21
0
from common_utils.common_types.keypoint import Keypoint2D_List, Keypoint2D
# import printj
from imageaug import AugHandler, Augmenter as aug
from random import choice
from tqdm import tqdm
# import imgaug.augmenters as iaa

# PATH='/home/jitesh/3d/data/coco_data/mp_200_23_04_2020_15_37_00_coco-data'
# path = '/home/jitesh/3d/data/coco_data/sample_measure_coco_data'
# path = '/home/jitesh/3d/data/coco_data/measure_combined7'
# dest_folder_img_combined = f'{path}/img'
# dest_json_file_combined = f'{path}/json/measure-only.json'
path = '/home/pasonatech/labelme/ndds2coco/6_22/bolt_mark/type4'
dest_folder_img_combined = f'{path}'
dest_json_file_combined = f'{path}/HSR-coco.json'
dataset = COCO_Dataset.load_from_path(json_path=dest_json_file_combined,
                                      img_dir=dest_folder_img_combined)
output = f'{path}/aug_vis'
make_dir_if_not_exists(output)
iaa = aug
# resize_save_path = 'test_resize.json'
handler_save_path = 'test_handler.json'
# if not file_exists(resize_save_path):
#     resize = aug.Resize(width=500, height=500)
#     resize.save_to_path(save_path=resize_save_path, overwrite=True)
#     logger.info(f'Created new Resize save.')
# else:
#     resize = aug.Resize.load_from_path(resize_save_path)
#     logger.info(f'Loaded Resize from save.')
# if not file_exists(handler_save_path):
#     handler = AugHandler(
#         [
Beispiel #22
0
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(
    json_path=
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/sim/20200228/28_02_2020_11_18_30_coco-data/HSR-coco.json',  # Modify this path
    check_paths=False)

dataset.print_handler_lengths()
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path('measure_coco/measure/output.json')
dataset.save_video(
    save_path='merged_mask_measure_viz.mp4',
    show_details=True,
    fps=3
)
Beispiel #24
0
def infer(
    path: str,
    weights_path: str,
    thresh: int = 0.5,
    key: str = 'R',
    infer_dump_dir: str = '',
    model: str = 'mask_rcnn_R_50_FPN_1x',
    size: int = 1024,
    class_names: List[str] = ['hook'],
    gt_path:
    str = '/home/jitesh/3d/data/coco_data/hook_test/json/cropped_hook.json'):
    # class_names=['hook', 'pole']
    # class_names=['hook']
    conf_thresh = 0.001
    show_bbox_border = True
    gt_dataset = COCO_Dataset.load_from_path(json_path=gt_path)
    inferer_seg = inferer(
        weights_path=weights_path,
        confidence_threshold=0.1,
        # num_classes=1,
        # num_classes=2,
        class_names=class_names,
        # class_names=['hook'],
        model='keypoint_rcnn_R_50_FPN_1x',
        # model='faster_rcnn_X_101_32x8d_FPN_3x',
        # model='faster_rcnn_R_101_FPN_3x',
        # model=model,
    )
    inferer_seg.cfg.INPUT.MIN_SIZE_TEST = size
    inferer_seg.cfg.INPUT.MAX_SIZE_TEST = size
    inferer_seg.cfg.MODEL.MASK_ON = True

    weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7/weights/Keypoints_R_50_1x_aug_cm_seg_val_1/model_0009999.pth'
    weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7_0.1/weights/Keypoints_R_50_1x_aug_cm_seg_val_3/model_0009999.pth'
    weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data7_0.1/weights/Keypoints_R_50_1x_aug_cm_seg_val_1/model_0007999.pth'
    weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_1/model_0009999.pth'
    weights_path = '/home/jitesh/3d/data/coco_data/hook_sim_real_data8/weights/Keypoints_R_50_1x_aug_key_seg_val_2/model_0004999.pth'
    # inferer_key = jDetectron2KeypointInferer(
    #     weights_path=weights_path,
    #     # ref_coco_ann_path=f'/home/jitesh/3d/data/coco_data/hook_real1/json/hook.json',
    #     # categories_path=f'/home/jitesh/3d/data/categories/hook_infer.json',
    #     # categories_path=f'/home/jitesh/3d/data/categories/hook_7ckpt.json',
    #     categories_path=f'/home/jitesh/3d/data/categories/hook_7ckpt_pole.json',
    #     target_category='hook',
    #     model_name='keypoint_rcnn_R_50_FPN_1x',
    #     bbox_threshold=bbox_thresh,
    #     kpt_threshold=kpt_thresh,
    #     key_box='hook',
    # )
    # k_size = 1024
    # inferer_key.cfg.INPUT.MIN_SIZE_TEST = k_size
    # inferer_key.cfg.INPUT.MAX_SIZE_TEST = k_size

    possible_modes = ['save', 'preview']
    mode = 'save'
    check_value(mode, valid_value_list=possible_modes)
    # make_dir_if_not_exists(infer_dump_dir)
    img_extensions = ['jpg', 'JPG', 'png', 'PNG']
    img_pathlist = get_all_files_in_extension_list(
        dir_path=f'{path}', extension_list=img_extensions)
    img_pathlist.sort()

    confirm_folder(infer_dump_dir, mode)
    # confirm_folder(f'{infer_dump_dir}/good_seg', mode)
    # confirm_folder(f'{infer_dump_dir}/good_cropped', mode)
    # confirm_folder(f'{infer_dump_dir}/good', mode)
    # confirm_folder(f'{infer_dump_dir}/G(>4D) P(>4D)', mode)
    # confirm_folder(f'{infer_dump_dir}/G(>4D) P(<4D)', mode)
    # confirm_folder(f'{infer_dump_dir}/G(<4D) P(>4D)', mode)
    # confirm_folder(f'{infer_dump_dir}/G(<4D) P(<4D)', mode)
    # confirm_folder(f'{infer_dump_dir}/bad', mode)
    confirm_folder(f'{infer_dump_dir}/infer_key_seg', mode)

    count = 0
    start = datetime.now()
    df = pd.DataFrame(data=[],
                      columns=[
                          'gt_d',
                          'pred_d',
                          'gt_ab',
                          'pred_ab',
                          'gt_ratio',
                          'pred_ratio',
                          'gt_ratio>4',
                          'pred_ratio>4',
                          'correct_above4d_ratio',
                          'incorrect_above4d_ratio',
                          'correct_below4d_ratio',
                          'incorrect_below4d_ratio',
                      ])
    #  'image_path'])
    for i, img_path in enumerate(tqdm(
            img_pathlist,
            desc='Writing images',
    )):
        img_filename = get_filename(img_path)
        # if not '201005_70_縮小革命PB020261.jpg' in img_path:
        #     continue
        # if i > 19:
        #     continue
        printj.purple(img_path)
        img = cv2.imread(img_path)
        result = img
        # print(f'shape {img.shape}')
        # cv2.imshow('i', img)
        # cv2.waitKey(100000)
        # continue
        score_list, pred_class_list, bbox_list, pred_masks_list, pred_keypoints_list, vis_keypoints_list, kpt_confidences_list = inferer_seg.predict(
            img=img)
        # printj.blue(pred_masks_list)
        max_hook_score = -1
        max_pole_score = -1
        diameter = -1
        len_ab = -1
        found_hook = False
        found_pole = False
        for score, pred_class, bbox, mask, keypoints, vis_keypoints, kpt_confidences in zip(
                score_list, pred_class_list, bbox_list, pred_masks_list,
                pred_keypoints_list, vis_keypoints_list, kpt_confidences_list):

            if pred_class == 'pole':
                found_pole = True
                if max_pole_score < score:
                    # if True:
                    max_pole_score = score
                    diameter = compute_diameter(mask)
                    # result = draw_bool_mask(img=result, mask=mask, color=[
                    #                     0, 255, 255],
                    #                     transparent=True
                    #                     )
                    pole_bbox_text = f'pole {str(round(score, 2))}'
                    pole_bbox = bbox
                    pole_mask = mask
                    # result = draw_bbox(img=result, bbox=bbox,
                    #                    text=pole_bbox_text, label_only=not show_bbox_border, label_orientation='bottom')
                    printj.blue(f'diameter={diameter}')
            if pred_class == 'hook':
                # printj.green.bold_on_yellow(score)
                found_hook = True
                if max_hook_score < score:
                    # if True:
                    max_hook_score = score
                    hook_bbox = BBox.buffer(bbox)
                    hook_score = round(score, 2)
                    hook_mask = mask
                    hook_keypoints = keypoints
                    hook_vis_keypoints = vis_keypoints
                    hook_kpt_confidences = kpt_confidences
                    # xmin, ymin, xmax, ymax = bbox.to_int().to_list()
                    # _xmin, _ymin, _xmax, _ymax = _bbox.to_int().to_list()
                    # width = _xmax-_xmin
                    # height = _ymax-_ymin
                    # scale = 0.2
                    # xmin = max(int(_xmin - width*scale), 0)
                    # xmax = min(int(_xmax + width*scale), img.shape[1])
                    # ymin = max(int(_ymin - height*scale), 0)
                    # ymax = min(int(_ymax + height*scale), img.shape[0])

                    # printj.red(score)
                    # printj.red(bbox)
                    # return
                    # img = draw_bbox(img=img, bbox=_bbox, color=[
                    #                 0, 255, 255], thickness=2, text=f"{pred_class} {round(score, 3)}",
                    #                 label_orientation='top')
                    # img = draw_bbox(img=img, bbox=_bbox, color=[
                    #                 0, 255, 255], thickness=2, text=f"{pred_class} {round(score, 3)}",
                    #                 label_orientation='bottom')
                    # result = draw_bool_mask(img=result, mask=mask, color=[
                    #     255, 255, 0],
                    #     transparent=True
                    # )
                    # result = result
                    # bbox_text = str(round(score, 4))
                    # result = draw_bbox(img=result, bbox=bbox,
                    #                    text=bbox_text, label_only=not show_bbox_border)
                    bbox_label_mode = 'euler'
                    # result = draw_keypoints(
                    #     img=result, keypoints=vis_keypoints, radius=2, color=[0, 0, 255],
                    #     # keypoint_labels=kpt_labels, show_keypoints_labels=True, label_thickness=1,
                    #     # ignore_kpt_idx=conf_idx_list
                    #     )
                    kpt_labels = [
                        "kpt-a", "kpt-b", "kpt-cb", "kpt-c", "kpt-cd", "kpt-d",
                        "kpt-e"
                    ]
                    kpt_skeleton = [[0, 1], [1, 2], [2, 3], [3, 4], [4, 5],
                                    [5, 6]]
                    conf_idx_list = np.argwhere(
                        np.array(kpt_confidences) > conf_thresh).reshape(-1)
                    not_conf_idx_list = np.argwhere(
                        np.array(kpt_confidences) <= conf_thresh).reshape(
                            -1).astype(int)
                    conf_keypoints, conf_kpt_labels = np.array(vis_keypoints)[
                        conf_idx_list], np.array(kpt_labels)[conf_idx_list]
                    not_conf_keypoints, not_conf_kpt_labels = np.array(
                        vis_keypoints)[not_conf_idx_list], np.array(
                            kpt_labels)[not_conf_idx_list]
                    cleaned_keypoints = np.array(vis_keypoints.copy()).astype(
                        np.float32)
                    # result = draw_bool_mask(img=result, mask=mask, color=[
                    #     255, 255, 0],
                    #     transparent=True
                    # )
                    # result, len_ab = draw_inference_on_hook2(img=result, cleaned_keypoints=cleaned_keypoints, kpt_labels=kpt_labels, kpt_skeleton=kpt_skeleton,
                    #                                         score=score, bbox=_bbox, vis_keypoints=vis_keypoints, kpt_confidences=kpt_confidences, conf_idx_list=conf_idx_list, not_conf_idx_list=not_conf_idx_list,
                    #                                         conf_keypoints=conf_keypoints, conf_kpt_labels=conf_kpt_labels, not_conf_keypoints=not_conf_keypoints, not_conf_kpt_labels=not_conf_kpt_labels,
                    #                                         conf_thresh=conf_thresh, show_bbox_border=show_bbox_border, bbox_label_mode=bbox_label_mode, index_offset=0, diameter=diameter)
                    # result=result
                    # printj.green(_bbox)
                    # printj.green(_bbox.to_int())
                    # printj.green(_bbox.to_int().to_list())
        printj.green.on_white(max_hook_score)
        if found_pole:
            result = draw_bool_mask(img=result,
                                    mask=pole_mask,
                                    color=[0, 255, 255],
                                    transparent=True)
            result = draw_bbox(img=result,
                               bbox=pole_bbox,
                               text=pole_bbox_text,
                               label_only=not show_bbox_border,
                               label_orientation='top')
            result = draw_bbox(img=result,
                               bbox=pole_bbox,
                               text=pole_bbox_text,
                               label_only=not show_bbox_border,
                               label_orientation='bottom')
        if found_hook:
            result = draw_bool_mask(img=result,
                                    mask=hook_mask,
                                    color=[255, 255, 0],
                                    transparent=True)
            result, len_ab = draw_inference_on_hook2(
                img=result,
                cleaned_keypoints=cleaned_keypoints,
                kpt_labels=kpt_labels,
                kpt_skeleton=kpt_skeleton,
                score=hook_score,
                bbox=hook_bbox,
                vis_keypoints=hook_vis_keypoints,
                kpt_confidences=hook_kpt_confidences,
                conf_idx_list=conf_idx_list,
                not_conf_idx_list=not_conf_idx_list,
                conf_keypoints=conf_keypoints,
                conf_kpt_labels=conf_kpt_labels,
                not_conf_keypoints=not_conf_keypoints,
                not_conf_kpt_labels=not_conf_kpt_labels,
                conf_thresh=conf_thresh,
                show_bbox_border=show_bbox_border,
                bbox_label_mode=bbox_label_mode,
                index_offset=0,
                diameter=diameter)
        printj.purple(len_ab)
        if len_ab == 0:
            printj.green(keypoints)
        result = draw_info_box(result, len_ab, diameter)
        #                 img: np.ndarray, cleaned_keypoints, kpt_labels: List[str], kpt_skeleton: List[list],
        # score: float, bbox: BBox, vis_keypoints: list, kpt_confidences: list, conf_idx_list: list, not_conf_idx_list: list,
        # conf_keypoints, conf_kpt_labels, not_conf_keypoints, not_conf_kpt_labels,
        # conf_thresh: float = 0.3, show_bbox_border: bool = False, bbox_label_mode: str = 'euler', index_offset: int = 0, diameter=1

        # cv2.imshow('i', result)
        # # cv2.imwrite('i', result)
        # cv2.waitKey(10000)
        # quit_flag = cv_simple_image_viewer(img=result, preview_width=1000)
        # if quit_flag:
        #     break

        # cv2.imwrite(f"{infer_dump_dir}/good_seg/{img_filename}", result)
        cv2.imwrite(f"{infer_dump_dir}/infer_key_seg/{img_filename}", result)