Ejemplo n.º 1
0
 def init_save_dir(self):
     make_dir_if_not_exists(self._save_dir)
     if self._clear:
         delete_all_files_in_dir(self._save_dir, ask_permission=False)
     else:
         self._existing_extensions = list(set([get_extension_from_path(path) for path in get_valid_image_paths(self._save_dir)]))
     self._first = False
Ejemplo n.º 2
0
    def save_to_dir(self,
                    json_save_dir: str,
                    src_img_dir: str,
                    dst_img_dir: str = None,
                    overwrite: bool = False,
                    show_pbar: bool = False):
        """Saves NDDS_Frame_Handler object to a directory path.

        Arguments:
            json_save_dir {str} -- [Path to directory where you want to save the NDDS annotation json files.]
            src_img_dir {str} -- [Path to directory where the original NDDS images are saved.]

        Keyword Arguments:
            dst_img_dir {str} -- [Path to directory where you want to copy the original NDDS images.] (default: {None})
            overwrite {bool} -- [Whether or not you would like to overwrite existing files/directories.] (default: {False})
            show_pbar {bool} -- [Whether or not you would like to show the progress bar.] (default: {False})
        """
        self._check_paths_valid(src_img_dir=src_img_dir)
        make_dir_if_not_exists(json_save_dir)
        delete_all_files_in_dir(json_save_dir, ask_permission=not overwrite)
        if dst_img_dir is not None:
            make_dir_if_not_exists(dst_img_dir)
            delete_all_files_in_dir(dst_img_dir, ask_permission=not overwrite)

        if show_pbar:
            pbar = tqdm(total=len(self), unit='ann(s)', leave=True)
            pbar.set_description(f'Saving {self.__class__.__name__}')
        for frame in self:
            save_path = f'{json_save_dir}/{get_rootname_from_path(frame.img_path)}.json'
            if dst_img_dir is not None:
                copy_file(
                    src_path=f'{src_img_dir}/{get_filename(frame.img_path)}',
                    dest_path=f'{dst_img_dir}/{get_filename(frame.img_path)}',
                    silent=True)
                if frame.cs_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.cs_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.cs_img_path)}',
                        silent=True)
                if frame.depth_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.depth_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.depth_img_path)}',
                        silent=True)
                if frame.is_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.is_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.is_img_path)}',
                        silent=True)
                frame.ndds_ann.save_to_path(save_path=save_path)
            else:
                frame.ndds_ann.save_to_path(save_path=save_path)
            if show_pbar:
                pbar.update()
Ejemplo n.º 3
0
 def setup_directories(self, verbose: bool=False):
     make_dir_if_not_exists(self.dest_dir)
     for target_dir in [self.train_dir, self.test_dir, self.val_dir]:
         if dir_exists(target_dir):
             delete_all_files_in_dir(dir_path=target_dir, ask_permission=False, verbose=verbose)
         else:
             make_dir(target_dir)
         img_dir = f"{target_dir}/{self.target_img_dir}"
         ann_dir = f"{target_dir}/{self.target_ann_dir}"
         make_dir(img_dir)
         make_dir(ann_dir)
Ejemplo n.º 4
0
    def save_to_dir(self,
                    json_save_dir: str,
                    src_img_dir: str,
                    overwrite: bool = False,
                    dst_img_dir: str = None,
                    show_pbar: bool = True):
        self._check_paths_valid(src_img_dir=src_img_dir)
        make_dir_if_not_exists(json_save_dir)
        delete_all_files_in_dir(json_save_dir, ask_permission=not overwrite)
        if dst_img_dir is not None:
            make_dir_if_not_exists(dst_img_dir)
            delete_all_files_in_dir(dst_img_dir, ask_permission=not overwrite)

        if show_pbar:
            pbar = tqdm(total=len(self), unit='ann(s)', leave=True)
            pbar.set_description(f'Saving {self.__class__.__name__}')
        for frame in self:
            save_path = f'{json_save_dir}/{get_rootname_from_path(frame.img_path)}.json'
            if dst_img_dir is not None:
                copy_file(
                    src_path=f'{src_img_dir}/{get_filename(frame.img_path)}',
                    dest_path=f'{dst_img_dir}/{get_filename(frame.img_path)}',
                    silent=True)
                if frame.cs_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.cs_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.cs_img_path)}',
                        silent=True)
                if frame.depth_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.depth_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.depth_img_path)}',
                        silent=True)
                if frame.is_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.is_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.is_img_path)}',
                        silent=True)
                frame.ndds_ann.save_to_path(save_path=save_path)
            else:
                frame.ndds_ann.save_to_path(save_path=save_path)
            if show_pbar:
                pbar.update()
Ejemplo n.º 5
0
    def save_to_dir(self, json_save_dir: str, src_img_dir: str, overwrite: bool=False, dst_img_dir: str=None):
        self._check_paths_valid(src_img_dir=src_img_dir)
        make_dir_if_not_exists(json_save_dir)
        delete_all_files_in_dir(json_save_dir, ask_permission=not overwrite)
        if dst_img_dir is not None:
            make_dir_if_not_exists(dst_img_dir)
            delete_all_files_in_dir(dst_img_dir, ask_permission=not overwrite)

        for ann in tqdm(self, total=len(self), unit='ann', leave=True):
            save_path = f'{json_save_dir}/{get_rootname_from_path(ann.img_path)}.json'
            src_img_path = f'{src_img_dir}/{get_filename(ann.img_path)}'
            if dst_img_dir is not None:
                dst_img_path = f'{dst_img_dir}/{get_filename(ann.img_path)}'
                copy_file(src_path=src_img_path, dest_path=dst_img_path, silent=True)
                ann.save_to_path(save_path=save_path, img_path=dst_img_path)
            else:
                ann.save_to_path(save_path=save_path, img_path=src_img_path)
Ejemplo n.º 6
0
    def save_to_dir(self, json_save_dir: str, src_img_dir: str, overwrite: bool=False, dst_img_dir: str=None, show_pbar: bool=True):
        self._check_paths_valid(src_img_dir=src_img_dir)
        make_dir_if_not_exists(json_save_dir)
        delete_all_files_in_dir(json_save_dir, ask_permission=not overwrite)
        if dst_img_dir is not None:
            make_dir_if_not_exists(dst_img_dir)
            delete_all_files_in_dir(dst_img_dir, ask_permission=not overwrite)

        pbar = tqdm(total=len(self), unit='annotation(s)', leave=True) if show_pbar else None
        if pbar is not None:
            pbar.set_description('Writing Labelme Annotations')
        for ann in self:
            save_path = f'{json_save_dir}/{get_rootname_from_path(ann.img_path)}.json'
            src_img_path = f'{src_img_dir}/{get_filename(ann.img_path)}'
            if dst_img_dir is not None:
                dst_img_path = f'{dst_img_dir}/{get_filename(ann.img_path)}'
                copy_file(src_path=src_img_path, dest_path=dst_img_path, silent=True)
                ann.save_to_path(save_path=save_path, img_path=dst_img_path)
            else:
                ann.save_to_path(save_path=save_path, img_path=src_img_path)
            if pbar is not None:
                pbar.update()
        if pbar is not None:
            pbar.close()
Ejemplo n.º 7
0
                      tag='valid'),
        DatasetConfig(img_dir=f'{data_dir}/sim/set6/img',
                      ann_path=f'{data_dir}/sim/set6/coco/output.json',
                      ann_format='coco',
                      tag='obsolete'),
        DatasetConfig(img_dir=f'{data_dir}/sim/set7/img',
                      ann_path=f'{data_dir}/sim/set7/coco/output.json',
                      ann_format='coco',
                      tag='new')
    ],
                            tag='sim'))

sim_handler = handler.filter_by_collection_tag(['sim'])
real_handler = handler.filter_by_collection_tag('real')
used_handler = handler.filter_by_dataset_tag(['new', 'valid', 'priority'])
unused_handler = handler.filter_by_dataset_tag(
    [None, 'invalid', 'obsolete', 'old'])
untagged_handler = handler.filter_by_dataset_tag(None)

config_dump_dir = 'config_dump'
make_dir_if_not_exists(config_dump_dir)
sim_handler.save_to_path(f'{config_dump_dir}/sim_datasets.json',
                         overwrite=True)
real_handler.save_to_path(f'{config_dump_dir}/real_datasets.json',
                          overwrite=True)
used_handler.save_to_path(f'{config_dump_dir}/used_datasets.json',
                          overwrite=True)
unused_handler.save_to_path(f'{config_dump_dir}/unused_datasets.json',
                            overwrite=True)
untagged_handler.save_to_path(f'{config_dump_dir}/untagged_datasets.json',
                              overwrite=True)
Ejemplo n.º 8
0
from logger import logger
from annotation_utils.ndds.structs import NDDS_Dataset
from annotation_utils.coco.structs import COCO_Dataset, COCO_Category_Handler
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir

src_root_dir = '/home/doors/workspace/oobayashi/hsr_data_4k'
targets = [
    '20200924_yagura5_bright_2-colored', 'yagura5_dark_2', 'yagura5_bright_2',
    'yagura5_dark_1', '20200924_yagura5_dark_2-colored',
    '20200924_yagura5_bright_1-colored', '20200924_yagura5_dark_1-colored',
    'yagura5_bright_1'
]
dst_root_dir = '/home/doors/workspace/prj/data_keep/data/toyota/dataset/sim/20200928'
video_preview_dir = f'{dst_root_dir}/preview'
make_dir_if_not_exists(video_preview_dir)
delete_all_files_in_dir(video_preview_dir, ask_permission=False)

hsr_categories = COCO_Category_Handler.load_from_path(
    '/home/doors/workspace/prj/data_keep/data/toyota/dataset/config/categories/hsr_categories.json'
)

for target in targets:
    src_target_dir = f'{src_root_dir}/{target}'
    dst_target_dir = f'{dst_root_dir}/{target}'
    make_dir_if_not_exists(dst_target_dir)
    delete_all_files_in_dir(dst_target_dir, ask_permission=False)

    # Load NDDS Dataset
    ndds_dataset = NDDS_Dataset.load_from_dir(json_dir=src_target_dir,
                                              show_pbar=True)
Ejemplo n.º 9
0
        def _wrapper_inner(*args, **kwargs):
            # Check/Adjust Parameters
            if isinstance(weight_path, (str, dict)):
                weight_paths = [weight_path]
            elif isinstance(weight_path, (tuple, list)):
                assert all([type(part) in [str, dict] for part in weight_path])
                for part in weight_path:
                    if isinstance(part, dict):
                        for key, val in part.items():
                            assert isinstance(val, str)
                weight_paths = weight_path
            else:
                raise TypeError
            if isinstance(model_name, str):
                model_names = [model_name]
            elif isinstance(model_name, (tuple, list)):
                assert all([type(part) is str for part in model_name])
                model_names = model_name
            else:
                raise TypeError
            assert len(weight_paths) == len(model_names)
            if isinstance(dataset, COCO_Dataset):
                datasets = [dataset]
            elif isinstance(dataset, (tuple, list)):
                assert all(
                    [isinstance(part, COCO_Dataset) for part in dataset])
                datasets = dataset
            else:
                raise TypeError
            if isinstance(test_name, str):
                test_names = [test_name]
            elif isinstance(test_name, (tuple, list)):
                assert all([type(part) is str for part in test_name])
                test_names = test_name
            else:
                raise TypeError
            assert len(datasets) == len(test_names)

            # Prepare Dump Directory
            if data_dump_dir is not None:
                make_dir_if_not_exists(data_dump_dir)
                # delete_all_files_in_dir(data_dump_dir, ask_permission=True)
            if video_dump_dir is not None:
                make_dir_if_not_exists(video_dump_dir)
                # delete_all_files_in_dir(video_dump_dir, ask_permission=True)
            if img_dump_dir is not None:
                make_dir_if_not_exists(img_dump_dir)
                # delete_all_files_in_dir(img_dump_dir, ask_permission=True)
            stream_writer = cast(StreamWriter, None)

            # Accumulate/Save Inference Data On Tests
            total_images = sum([len(dataset.images) for dataset in datasets])
            test_pbar = tqdm(total=total_images * len(model_names),
                             unit='image(s)',
                             leave=True) if show_pbar else None
            reserved_params = [
                'weight_path', 'model_name', 'dataset', 'test_name',
                'accumulate_pred_dump', 'stream_writer',
                'leave_stream_writer_open'
            ]
            for param in reserved_params:
                assert param not in kwargs, f'{param} already exists in kwargs'
                assert param in infer_func.__annotations__, f"{infer_func.__name__} needs to accept a {param} keyword argument to be wrapped by infer_tests_wrapper"
            for weight_path0, model_name0 in zip(weight_paths, model_names):
                video_save_path = f'{video_dump_dir}/{model_name0}.avi' if video_dump_dir is not None else None
                data_dump_save = f'{data_dump_dir}/{model_name0}.json' if data_dump_dir is not None else None
                if data_dump_save is not None and file_exists(
                        data_dump_save) and skip_if_data_dump_exists:
                    if test_pbar is not None:
                        for dataset0, test_name0 in zip(datasets, test_names):
                            test_pbar.update(len(dataset0.images))
                    continue
                if stream_writer is None:
                    stream_writer = StreamWriter(
                        show_preview=show_preview,
                        video_save_path=video_save_path,
                        dump_dir=img_dump_dir)
                elif video_save_path is not None:
                    stream_writer.video_writer._save_path = video_save_path
                if img_dump_dir is not None:
                    model_img_dump_dir = f'{img_dump_dir}/{model_name0}'
                    make_dir_if_not_exists(model_img_dump_dir)
                else:
                    model_img_dump_dir = None
                data = handler_constructor()
                assert isinstance(data, BasicLoadableHandler)
                assert hasattr(data, '__add__')
                # if video_dump_dir is not None:
                #     video_save_path = f'{video_dump_dir}/{model_name0}.avi'
                # else:
                #     video_save_path = None
                for dataset0, test_name0 in zip(datasets, test_names):
                    if test_pbar is not None:
                        test_pbar.set_description(
                            f'{model_name0} {test_name0}')
                    if img_dump_dir is not None:
                        test_img_dump_dir = f'{model_img_dump_dir}/{test_name0}'
                        make_dir_if_not_exists(test_img_dump_dir)
                        stream_writer.dump_writer._save_dir = test_img_dump_dir
                    kwargs['weight_path'] = weight_path0
                    kwargs['model_name'] = model_name0
                    kwargs['dataset'] = dataset0
                    kwargs['test_name'] = test_name0
                    kwargs['accumulate_pred_dump'] = data_dump_dir is not None
                    kwargs['stream_writer'] = stream_writer
                    kwargs['leave_stream_writer_open'] = True
                    if data_dump_dir is not None:
                        data0 = infer_func(*args, **kwargs)
                        assert isinstance(
                            data0, handler_constructor
                        ), f"Encountered dump data of type {type(data0).__name__}. Expected {handler_constructor.__name__}."
                        data += data0
                    else:
                        infer_func(*args, **kwargs)
                    if test_pbar is not None:
                        test_pbar.update(len(dataset0.images))
                if data_dump_dir is not None:
                    data.save_to_path(data_dump_save, overwrite=True)
                if stream_writer is not None and stream_writer.video_writer is not None and stream_writer.video_writer.recorder is not None:
                    stream_writer.video_writer.recorder.close()
                    stream_writer.video_writer.recorder = None
            if test_pbar is not None:
                test_pbar.close()
            if stream_writer is not None:
                del stream_writer
Ejemplo n.º 10
0
from annotation_utils.ndds.structs import NDDS_Dataset
from annotation_utils.coco.structs import COCO_Category_Handler, COCO_Category
from annotation_utils.coco.structs import COCO_Dataset
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir
from typing import cast
from annotation_utils.coco.structs import COCO_Image, COCO_Annotation
from common_utils.path_utils import get_rootname_from_filename, get_extension_from_filename
from common_utils.common_types.point import Point2D
from tqdm import tqdm
import cv2
from logger import logger
from typing import List

target_src_dir = '/home/clayton/workspace/prj/data_keep/data/ndds/bolt_markMap_2020.08.18-13.03.35'
target_dst_dir = 'bolt_kpt'
make_dir_if_not_exists(target_dst_dir)
delete_all_files_in_dir(target_dst_dir)

# Load NDDS Dataset
logger.info('Loading NDDS Dataset')
ndds_dataset = NDDS_Dataset.load_from_dir(
    json_dir=target_src_dir,
    show_pbar=True
)
delete_idx_list = []


# Fix NDDS Dataset naming so that it follows convention. (This is not necessary if the NDDS dataset already follows the naming convention.)
for i, frame in enumerate(ndds_dataset.frames):
    for ann_obj in frame.ndds_ann.objects:
        if ann_obj.class_name.startswith('bolt'):
Ejemplo n.º 11
0
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir
from annotation_utils.coco.structs import COCO_Dataset

dataset = COCO_Dataset.load_from_path(json_path='output.json')
# dataset.display_preview()
dump_dir = 'ann_dump'
make_dir_if_not_exists(dump_dir)
delete_all_files_in_dir(dump_dir, ask_permission=False)

dataset.info.save_to_path(f'{dump_dir}/info.json')
dataset.images.save_to_path(f'{dump_dir}/images.json')
dataset.annotations.save_to_path(f'{dump_dir}/annotations.json')
dataset.categories.save_to_path(f'{dump_dir}/categories.json')
dataset.categories.save_to_path('box_hsr_categories.json')
Ejemplo n.º 12
0
def gen_infer_comparison(gt: BasicLoadableHandler,
                         dt: BasicLoadableHandler,
                         error: BasicLoadableHandler,
                         model_names: List[str],
                         test_names: List[str],
                         collage_shape: (int, int),
                         test_img_dir_map: Dict[str, str] = None,
                         model_aliases: Dict[str, str] = None,
                         test_aliases: Dict[str, str] = None,
                         video_save: str = None,
                         img_dump_dir: str = None,
                         show_preview: bool = False,
                         show_pbar: bool = True,
                         draw_settings=None,
                         draw_inference: bool = False,
                         details_func=None,
                         debug_verbose: bool = False):
    for handler in [gt, dt, error]:
        assert isinstance(handler, BasicLoadableHandler)
        for attr_key in ['frame', 'test_name']:
            assert hasattr(handler[0], attr_key)
    for handler in [dt, error]:
        assert hasattr(handler[0], 'model_name')
    model_names0 = list(set([datum.model_name for datum in dt
                             ])) if model_names == 'all' else model_names
    test_names0 = list(set([datum.test_name for datum in gt
                            ])) if test_names == 'all' else test_names
    for val_list in [model_names0, test_names0]:
        if val_list != 'all':
            assert isinstance(val_list, (tuple, list))
            for val in val_list:
                assert isinstance(val, str)
    assert isinstance(collage_shape, (tuple, list))
    for val in collage_shape:
        assert isinstance(val, int)
    assert len(collage_shape) == 2
    assert len(model_names0) <= collage_shape[0] * collage_shape[1]
    if img_dump_dir is not None:
        make_dir_if_not_exists(img_dump_dir)
        delete_all_files_in_dir(img_dump_dir, ask_permission=False)
    if test_img_dir_map is None:
        test_img_dir_map0 = {test_name: test_name for test_name in test_names0}
    else:
        assert isinstance(test_img_dir_map, dict)
        for key, val in test_img_dir_map.items():
            assert key in test_names0
            assert isinstance(key, str)
            assert isinstance(val, str)
        test_img_dir_map0 = {
            test_name: (test_img_dir_map[test_name] if test_name in test_img_dir_map else test_name) \
            for test_name in test_names0
        }
    for test_name, img_dir in test_img_dir_map0.items():
        if not dir_exists(img_dir):
            raise FileNotFoundError(f"""
                Couldn't find image directory {img_dir} for {test_name}.
                Please modify test_img_dir_map to match the image directory path for {test_name}.
                test_img_dir_map: {test_img_dir_map0}
                """)
    stream_writer = StreamWriter(show_preview=show_preview,
                                 video_save_path=video_save,
                                 dump_dir=img_dump_dir)

    total_images = len(gt.get(test_name=test_names0))
    pbar = tqdm(total=total_images, unit='image(s)',
                leave=True) if show_pbar else None
    if pbar is not None:
        pbar.set_description('Generating Comparison')
    for test_name in test_names0:
        if img_dump_dir is not None:
            test_img_dump_dir = f'{img_dump_dir}/{test_name}'
            make_dir_if_not_exists(test_img_dump_dir)
            stream_writer.dump_writer._save_dir = test_img_dump_dir

        img_dir = test_img_dir_map0[test_name]
        gt_test_data = gt.get(test_name=test_name)
        gt_test_data.sort(attr_name='frame')
        dt_test_data = dt.get(test_name=test_name)
        dt_test_data.sort(attr_name='frame')
        error_test_data = error.get(test_name=test_name)
        error_test_data.sort(attr_name='frame')

        for gt_datum in gt_test_data:
            file_name = gt_datum.frame
            img_path = f'{img_dir}/{file_name}'
            if not file_exists(img_path):
                if debug_verbose:
                    print(f"""
                        Couldn't find image. Skipping.
                            test_name: {test_name}
                            img_path: {img_path}
                        """)
                pbar.update()
                continue
            img = cv2.imread(img_path)
            dt_frame_data = dt_test_data.get(frame=gt_datum.frame)
            error_frame_data = error_test_data.get(frame=gt_datum.frame)

            img_buffer = cast(List[np.ndarray], [])
            for model_name in model_names0:
                dt_model_data = dt_frame_data.get(model_name=model_name)
                dt_model_datum = dt_model_data[0] if len(
                    dt_model_data) > 0 else None
                error_model_data = error_frame_data.get(model_name=model_name)
                error_datum = error_model_data[0] if len(
                    error_model_data) > 0 else None

                result = img.copy()
                if draw_inference or draw_settings is not None:
                    if draw_settings is not None:
                        if dt_model_datum is not None:
                            result = dt_model_datum.draw(
                                result, settings=draw_settings)
                    else:
                        if dt_model_datum is not None:
                            result = dt_model_datum.draw(result)

                if test_aliases is not None and gt_datum.test_name in test_aliases:
                    test_text = test_aliases[gt_datum.test_name]
                else:
                    test_text = gt_datum.test_name if gt_datum is not None else None
                if model_aliases is not None and dt_model_datum.model_name in model_aliases:
                    model_text = model_aliases[dt_model_datum.model_name]
                else:
                    model_text = dt_model_datum.model_name if dt_model_datum is not None else None

                if details_func is not None:
                    for key in ['gt', 'dt', 'error']:
                        assert key in details_func.__annotations__, f'{details_func.__name__} must have a {key} parameter.'
                    details_func_params = {
                        'img': result,
                        'gt': gt_datum,
                        'dt': dt_model_datum,
                        'error': error_datum
                    }
                    suggested_params = {
                        'test_text': test_text,
                        'model_text': model_text,
                        'frame_text': gt_datum.frame
                    }
                    for key, val in suggested_params.items():
                        if key in details_func.__annotations__:
                            details_func_params[key] = val
                    result = details_func(**details_func_params)
                else:
                    row_text_list = [
                        f'Test: {test_text}', f'Model: {model_text}',
                        f'Frame: {gt_datum.frame}'
                    ]
                    result_h, result_w = result.shape[:2]
                    combined_row_height = len(row_text_list) * 0.04 * result_h
                    result = draw_text_rows_at_point(
                        img=result,
                        row_text_list=row_text_list,
                        x=result_w * 0.01,
                        y=result_h * 0.01,
                        combined_row_height=combined_row_height)
                img_buffer.append(result)
            collage_img = collage_from_img_buffer(img_buffer=img_buffer,
                                                  collage_shape=collage_shape)
            stream_writer.step(img=collage_img, file_name=file_name)
            if pbar is not None:
                pbar.update()
    if pbar is not None:
        pbar.close()
Ejemplo n.º 13
0
from random import choice
from tqdm import tqdm
# import imgaug.augmenters as iaa

# PATH='/home/jitesh/3d/data/coco_data/mp_200_23_04_2020_15_37_00_coco-data'
# path = '/home/jitesh/3d/data/coco_data/sample_measure_coco_data'
# path = '/home/jitesh/3d/data/coco_data/measure_combined7'
# dest_folder_img_combined = f'{path}/img'
# dest_json_file_combined = f'{path}/json/measure-only.json'
path = '/home/pasonatech/labelme/ndds2coco/6_22/bolt_mark/type4'
dest_folder_img_combined = f'{path}'
dest_json_file_combined = f'{path}/HSR-coco.json'
dataset = COCO_Dataset.load_from_path(json_path=dest_json_file_combined,
                                      img_dir=dest_folder_img_combined)
output = f'{path}/aug_vis'
make_dir_if_not_exists(output)
iaa = aug
# resize_save_path = 'test_resize.json'
handler_save_path = 'test_handler.json'
# if not file_exists(resize_save_path):
#     resize = aug.Resize(width=500, height=500)
#     resize.save_to_path(save_path=resize_save_path, overwrite=True)
#     logger.info(f'Created new Resize save.')
# else:
#     resize = aug.Resize.load_from_path(resize_save_path)
#     logger.info(f'Loaded Resize from save.')
# if not file_exists(handler_save_path):
#     handler = AugHandler(
#         [
#             # aug.Affine(scale = {"x": tuple([0.8, 1.2]), "y":tuple([0.8, 1.2])}, translate_percent= {"x": tuple([0.1, 0.11]), "y":tuple([0.1, 0.11])}, rotate= [-180, 180], order= [0, 0], cval= [0, 0], shear= [0,0]),
#             # aug.Crop(percent=[0.2, 0.5]),
Ejemplo n.º 14
0
    def move(self,
             dst_dataroot: str,
             include_depth: bool = True,
             include_RT: bool = False,
             camera_path: str = None,
             fps_path: str = None,
             preserve_filename: bool = False,
             use_softlink: bool = False,
             ask_permission_on_delete: bool = True,
             show_pbar: bool = True):
        make_dir_if_not_exists(dst_dataroot)
        delete_all_files_in_dir(dst_dataroot,
                                ask_permission=ask_permission_on_delete,
                                verbose=False)
        processed_image_id_list = []
        pbar = tqdm(total=len(self.annotations),
                    unit='annotation(s)',
                    leave=True) if show_pbar else None
        if pbar is not None:
            pbar.set_description('Moving Linemod Dataset Data')
        for linemod_ann in self.annotations:
            if not dir_exists(linemod_ann.data_root):
                raise FileNotFoundError(
                    f"Couldn't find data_root at {linemod_ann.data_root}")

            # Images
            linemod_image = self.images.get(id=linemod_ann.image_id)[0]
            if linemod_image.id not in processed_image_id_list:
                img_path = f'{linemod_ann.data_root}/{get_filename(linemod_image.file_name)}'
                if not file_exists(img_path):
                    raise FileNotFoundError(
                        f"Couldn't find image at {img_path}")
                if preserve_filename:
                    dst_img_path = f'{dst_dataroot}/{get_filename(linemod_image.file_name)}'
                    if file_exists(dst_img_path):
                        raise FileExistsError(f"""
                            Image already exists at {dst_img_path}
                            Hint: Use preserve_filename=False to bypass this error.
                            """)
                else:
                    dst_filename = f'{linemod_image.id}.{get_extension_from_filename(linemod_image.file_name)}'
                    linemod_image.file_name = dst_filename
                    dst_img_path = f'{dst_dataroot}/{dst_filename}'
                if not use_softlink:
                    copy_file(src_path=img_path,
                              dest_path=dst_img_path,
                              silent=True)
                else:
                    create_softlink(src_path=rel_to_abs_path(img_path),
                                    dst_path=rel_to_abs_path(dst_img_path))
                processed_image_id_list.append(linemod_image.id)

            # Masks
            if not file_exists(linemod_ann.mask_path):
                raise FileNotFoundError(
                    f"Couldn't find mask at {linemod_ann.mask_path}")
            mask_path = linemod_ann.mask_path
            if preserve_filename:
                dst_mask_path = f'{dst_dataroot}/{get_filename(linemod_ann.mask_path)}'
                if file_exists(dst_mask_path):
                    raise FileExistsError(f"""
                        Mask already exists at {dst_mask_path}
                        Hint: Use preserve_filename=False to bypass this error.
                        """)
            else:
                mask_filename = get_filename(linemod_ann.mask_path)
                dst_filename = f'{linemod_ann.id}_mask.{get_extension_from_filename(mask_filename)}'
                dst_mask_path = f'{dst_dataroot}/{dst_filename}'
                linemod_ann.mask_path = dst_mask_path
            if not use_softlink:
                copy_file(src_path=mask_path,
                          dest_path=dst_mask_path,
                          silent=True)
            else:
                create_softlink(src_path=rel_to_abs_path(mask_path),
                                dst_path=rel_to_abs_path(dst_mask_path))

            # Depth
            if include_depth and linemod_ann.depth_path is not None:
                if not file_exists(linemod_ann.depth_path):
                    raise FileNotFoundError(
                        f"Couldn't find depth at {linemod_ann.depth_path}")
                depth_path = linemod_ann.depth_path
                if preserve_filename:
                    dst_depth_path = f'{dst_dataroot}/{get_filename(linemod_ann.depth_path)}'
                    if file_exists(dst_depth_path):
                        raise FileExistsError(f"""
                            Depth already exists at {dst_depth_path}
                            Hint: Use preserve_filename=False to bypass this error.
                            """)
                else:
                    depth_filename = get_filename(linemod_ann.depth_path)
                    dst_filename = f'{linemod_ann.id}_depth.{get_extension_from_filename(depth_filename)}'
                    dst_depth_path = f'{dst_dataroot}/{dst_filename}'
                    linemod_ann.depth_path = dst_depth_path
                if not use_softlink:
                    copy_file(src_path=depth_path,
                              dest_path=dst_depth_path,
                              silent=True)
                else:
                    create_softlink(src_path=rel_to_abs_path(depth_path),
                                    dst_path=rel_to_abs_path(dst_depth_path))

            # RT pickle files
            if include_RT:
                rootname = get_rootname_from_path(mask_path)
                if rootname.endswith('_mask'):
                    rootname = rootname.replace('_mask', '')
                rt_filename = f'{rootname}_RT.pkl'
                rt_path = f'{linemod_ann.data_root}/{rt_filename}'
                if not file_exists(rt_path):
                    raise FileNotFoundError(
                        f"Couldn't find RT pickle file at {rt_path}")
                if preserve_filename:
                    dst_rt_path = f'{dst_dataroot}/{rt_filename}'
                    if file_exists(dst_depth_path):
                        raise FileExistsError(f"""
                            RT pickle file already exists at {dst_rt_path}
                            Hint: Use preserve_filename=False to bypass this error.
                            """)
                else:
                    dst_rt_filename = f'{linemod_ann.id}_RT.pkl'
                    dst_rt_path = f'{dst_dataroot}/{dst_rt_filename}'
                if not use_softlink:
                    copy_file(src_path=rt_path,
                              dest_path=dst_rt_path,
                              silent=True)
                else:
                    create_softlink(src_path=rel_to_abs_path(rt_path),
                                    dst_path=rel_to_abs_path(dst_rt_path))
            if pbar is not None:
                pbar.update()
        # Camera setting
        if camera_path is not None:
            if not file_exists(camera_path):
                raise FileNotFoundError(
                    f"Couldn't find camera settings at {camera_path}")
            dst_camera_path = f'{dst_dataroot}/{get_filename(camera_path)}'
            if file_exists(dst_camera_path):
                raise FileExistsError(
                    f'Camera settings already saved at {dst_camera_path}')
            if not use_softlink:
                copy_file(src_path=camera_path,
                          dest_path=dst_camera_path,
                          silent=True)
            else:
                create_softlink(src_path=rel_to_abs_path(camera_path),
                                dst_path=rel_to_abs_path(dst_camera_path))

        # FPS setting
        if fps_path is not None:
            if not file_exists(fps_path):
                raise FileNotFoundError(
                    f"Couldn't find FPS settings at {fps_path}")
            dst_fps_path = f'{dst_dataroot}/{get_filename(fps_path)}'
            if file_exists(dst_fps_path):
                raise FileExistsError(
                    f'FPS settings already saved at {dst_fps_path}')
            if not use_softlink:
                copy_file(src_path=fps_path,
                          dest_path=dst_fps_path,
                          silent=True)
            else:
                create_softlink(src_path=rel_to_abs_path(fps_path),
                                dst_path=rel_to_abs_path(dst_fps_path))
        if pbar is not None:
            pbar.close()
    delimiter='_',
    ignore_unspecified_categories=True,
    show_pbar=True,
    bbox_area_threshold=1,
    default_visibility_threshold=0.10,
    visibility_threshold_dict={'measure': 0.01},
    allow_unfound_seg=True,
    class_merge_map={
        'mark_10th_place': 'seg_measure',
        'marking_bottom': 'seg_measure',
        'marking_top': 'seg_measure',
        'hook': 'seg_measure'
    })

# Output Directories
make_dir_if_not_exists('measure_coco')
delete_all_files_in_dir('measure_coco')

measure_dir = 'measure_coco/measure'
whole_number_dir = 'measure_coco/whole_number'
digit_dir = 'measure_coco/digit'
json_output_filename = 'output.json'

measure_dataset, whole_number_dataset, digit_dataset = dataset.split_measure_dataset(
    measure_dir=measure_dir,
    whole_number_dir=whole_number_dir,
    digit_dir=digit_dir,
    allow_no_measures=True,
    allow_missing_parts=True)

if False:  # Change to True if you want to remove all segmentation from the measure dataset.
Ejemplo n.º 16
0
    img_dir=
    '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera/combined'
)
linemod_ann_sample = linemod_dataset.annotations[0]
kpt_3d = linemod_ann_sample.fps_3d.copy()
kpt_3d.append(linemod_ann_sample.center_3d)
corner_3d = linemod_ann_sample.corner_3d
K = linemod_ann_sample.K
linemod_image_sample = linemod_dataset.images[0]
dsize = (linemod_image_sample.width, linemod_image_sample.height)

weights_dir = '/home/clayton/workspace/git/clean-pvnet/data/model/pvnet/custom'
weight_path_list = get_all_files_of_extension(weights_dir, 'pth')
weight_path_list.sort()
infer_data_dump_dir = '/home/clayton/workspace/prj/data_keep/data/toyota/from_toyota/20201017/20201017_robot_camera/infer_dump'
make_dir_if_not_exists(infer_data_dump_dir)
# delete_all_files_in_dir(infer_data_dump_dir, ask_permission=True)
weights_pbar = tqdm(total=len(weight_path_list), unit='weight(s)')
for weight_path in weight_path_list:
    rootname = get_rootname_from_path(weight_path)
    weights_pbar.set_description(rootname)
    pred_dump_path = f'{infer_data_dump_dir}/{rootname}.json'
    if file_exists(pred_dump_path):
        weights_pbar.update()
        continue
    inferer = PVNetInferer(weight_path=weight_path)
    inferer.infer_coco_dataset(dataset=coco_dataset,
                               kpt_3d=kpt_3d,
                               corner_3d=corner_3d,
                               K=K,
                               blackout=True,
Ejemplo n.º 17
0
def prepare_datasets_from_excel(
        xlsx_path: str,
        dst_root_dir: str,
        usecols: str = 'A:L',
        skiprows: int = None,
        skipfooter: int = 0,
        skip_existing: bool = False,
        val_target_proportion: float = 0.05,
        min_val_size: int = None,
        max_val_size: int = None,
        orig_config_save: str = 'orig.yaml',
        reorganized_config_save: str = 'dataset_config.yaml',
        show_pbar: bool = True):
    """
    Parameters:
        xlsx_path - Path to excel sheet that contains all of the information about where your datasets are located.
        dst_root_dir - Path to where you would like to save your prepared scenario datasets (split into train and val)
        usecols - Specify which columns you would like to parse from the excel sheet at xlsx_path. [Default: 'A:L']
        skiprows - Specify the number of rows from the top that you would like to skip when parsing the excel sheet. [Default: None]
        skipfooter - Specify the number of rows from the bottom that you would like to skip when parsing the excel sheet. [Default: 0]
        skip_existing - If you terminated dataset preparation midway, you can skip the scenarios that were already made using skip_existing=True. [Default: False]
        val_target_proportion - The proportion of your scenario that you would like to allocate to validation. [Default: 0.05]
        min_val_size - The minimum number of images that you would like to use for validation. [Default: None]
        max_val_size - The maximum number of images that you would like to use for validation. [Default: None]
        orig_config_save - Where you would like to save the dataset configuration representing your scenario_root_dir. [Default: 'orig.yaml]
        reorganized_config_save - Where you would like to save the dataset configuration representing your dst_root_dir. [Default: 'dataset_config.yaml']
        show_pbar - Whether or not you would like to show a progress bar during preparation. [Default: True]
    
    Description:
        The datasets specified in the excel sheet at xlsx_path will be combined and then split into a train + validation folder.
        Since the absolute paths of both image directories and annotation paths are parsed from the excel sheet, there is no need to place any restrictions
        on where each dataset needs to be located.

        The destination root directory will have the following structure:
            dst_root_dir
                scenario0
                    train
                    val
                scenario1
                    train
                    val
                scenario2
                    train
                    val
                ...

        The dataset configuration file saved at reorganized_config_save will reflect the directory structure of dst_root_dir.
        The configuration file representing the directory structure defined in your excel sheet is saved under orig_config_save.

        Note that orig_config_save and reorganized_config_save do not have to be inside of dst_root_dir.
        On the contrary, it is recommended to not save orig_config_save and reorganized_config_save inside of dst_root_dir.
        It is recommended that you change the path of orig_config_save and reorganized_config_save everytime you make an addition to your datasets.
        This is because you will likely want to keep track of the previous states of your dataset configuration, and you
        may also want to rollback to a previous configuration at any given time.
    """
    # Parse Excel Sheet
    if not file_exists(xlsx_path):
        raise FileNotFoundError(f'File not found: {xlsx_path}')
    data_df = pd.read_excel(xlsx_path,
                            usecols=usecols,
                            skiprows=skiprows,
                            skipfooter=skipfooter)
    data_records = data_df.to_dict(orient='records')

    required_keys = [
        'Scenario Name', 'Dataset Name', 'Image Directory', 'Annotation Path'
    ]
    parsed_keys = list(data_records[0].keys())
    missing_keys = []
    for required_key in required_keys:
        if required_key not in parsed_keys:
            missing_keys.append(required_key)
    if len(missing_keys) > 0:
        raise KeyError(f"""
            Couldn't find the following required keys in the given excel sheet:
            missing_keys: {missing_keys}
            required_keys: {required_keys}
            parsed_keys: {parsed_keys}
            xlsx_path: {xlsx_path}

            Please check your excel sheet and script parameters and try again.
            Note: usecols, skiprows, and skipfooter affect which parts of the excel sheet are parsed.
            """)

    def is_empty_cell(info_dict: Dict[str, str],
                      key: str,
                      expected_type: type = str) -> bool:
        return not isinstance(info_dict[key], expected_type) and math.isnan(
            info_dict[key])

    collection_handler = DatasetConfigCollectionHandler()
    current_scenario_name = None
    working_config_list = cast(List[DatasetConfig], [])
    pbar = tqdm(total=len(data_records), unit='item(s)') if show_pbar else None
    if pbar is not None:
        pbar.set_description('Parsing Excel Sheet')
    for info_dict in data_records:
        for required_cell_key in [
                'Dataset Name', 'Image Directory', 'Annotation Path'
        ]:
            if is_empty_cell(info_dict,
                             key=required_cell_key,
                             expected_type=str):
                raise ValueError(f"""
                    Encountered empty cell under {required_cell_key}.
                    Row Dictionary: {info_dict}
                    xlsx_path: {xlsx_path}
                    Please check your excel sheet.
                    """)
        assert 'Scenario Name' in info_dict
        scenario_name = info_dict['Scenario Name'] \
            if 'Scenario Name' in info_dict and not is_empty_cell(info_dict, key='Scenario Name', expected_type=str) \
            else None
        dataset_name = info_dict['Dataset Name']
        img_dir = info_dict['Image Directory']
        ann_path = info_dict['Annotation Path']
        if scenario_name is not None:
            if len(working_config_list) > 0:
                collection = DatasetConfigCollection(working_config_list,
                                                     tag=current_scenario_name)
                collection_handler.append(collection)
                working_config_list = []
            current_scenario_name = scenario_name
        config = DatasetConfig(img_dir=img_dir,
                               ann_path=ann_path,
                               ann_format='coco',
                               tag=dataset_name)
        working_config_list.append(config)
        if pbar is not None:
            pbar.update()
    if len(working_config_list) > 0:
        collection = DatasetConfigCollection(working_config_list,
                                             tag=current_scenario_name)
        collection_handler.append(collection)
        working_config_list = []
    if pbar is not None:
        pbar.close()
    collection_handler.save_to_path(orig_config_save, overwrite=True)

    # Combine Datasets
    train_collection = DatasetConfigCollection(tag='train')
    val_collection = DatasetConfigCollection(tag='val')

    make_dir_if_not_exists(dst_root_dir)
    pbar = tqdm(total=len(collection_handler),
                unit='scenario(s)') if show_pbar else None
    if pbar is not None:
        pbar.set_description('Combining Scenarios')
    for collection in collection_handler:
        scenario_root_dir = f'{dst_root_dir}/{collection.tag}'
        make_dir_if_not_exists(scenario_root_dir)
        scenario_train_dir = f'{scenario_root_dir}/train'
        make_dir_if_not_exists(scenario_train_dir)
        scenario_val_dir = f'{scenario_root_dir}/val'
        make_dir_if_not_exists(scenario_val_dir)

        if (not file_exists(f'{scenario_train_dir}/output.json')
                or not file_exists(f'{scenario_val_dir}/output.json')
            ) or not skip_existing:
            combined_dataset = COCO_Dataset.combine_from_config(
                collection, img_sort_attr_name='file_name', show_pbar=False)
            orig_num_images = len(combined_dataset.images)
            assert orig_num_images >= 2, f'{collection.tag} has only {orig_num_images} images, and thus cannot be split into train and val.'
            num_val = int(len(combined_dataset.images) * val_target_proportion)
            num_val = 1 if num_val == 0 else num_val
            num_val = min_val_size if min_val_size is not None and num_val < min_val_size else num_val
            num_val = max_val_size if max_val_size is not None and num_val > max_val_size else num_val
            num_train = orig_num_images - num_val
            train_dataset, val_dataset = combined_dataset.split_into_parts(
                ratio=[num_train, num_val], shuffle=True)

            train_dataset.move_images(dst_img_dir=scenario_train_dir,
                                      preserve_filenames=False,
                                      update_img_paths=True,
                                      overwrite=True,
                                      show_pbar=False)
            train_dataset.save_to_path(f'{scenario_train_dir}/output.json',
                                       overwrite=True)
            train_collection.append(
                DatasetConfig(img_dir=scenario_train_dir,
                              ann_path=f'{scenario_train_dir}/output.json',
                              tag=f'{collection.tag}_train'))

            val_dataset.move_images(dst_img_dir=scenario_val_dir,
                                    preserve_filenames=False,
                                    update_img_paths=True,
                                    overwrite=True,
                                    show_pbar=False)
            val_dataset.save_to_path(f'{scenario_val_dir}/output.json',
                                     overwrite=True)
            val_collection.append(
                DatasetConfig(img_dir=scenario_val_dir,
                              ann_path=f'{scenario_val_dir}/output.json',
                              tag=f'{collection.tag}_val'))
        else:
            train_dataset = COCO_Dataset.load_from_path(
                f'{scenario_train_dir}/output.json',
                img_dir=f'{scenario_train_dir}')
            train_collection.append(
                DatasetConfig(img_dir=scenario_train_dir,
                              ann_path=f'{scenario_train_dir}/output.json',
                              tag=f'{collection.tag}_train'))
            val_dataset = COCO_Dataset.load_from_path(
                f'{scenario_val_dir}/output.json',
                img_dir=f'{scenario_val_dir}')
            val_collection.append(
                DatasetConfig(img_dir=scenario_val_dir,
                              ann_path=f'{scenario_val_dir}/output.json',
                              tag=f'{collection.tag}_val'))
        if pbar is not None:
            pbar.update()
    if pbar is not None:
        pbar.close()

    organized_collection_handler = DatasetConfigCollectionHandler(
        [train_collection, val_collection])
    organized_collection_handler.save_to_path(reorganized_config_save,
                                              overwrite=True)
Ejemplo n.º 18
0
def prepare_datasets_from_dir(
        scenario_root_dir: str,
        dst_root_dir: str,
        annotation_filename: str = 'output.json',
        skip_existing: bool = False,
        val_target_proportion: float = 0.05,
        min_val_size: int = None,
        max_val_size: int = None,
        orig_config_save: str = 'orig.yaml',
        reorganized_config_save: str = 'dataset_config.yaml',
        show_pbar: bool = True):
    """
    Parameters:
        scenario_root_dir - Path to the source root directory containing all of your scenario folders. [Required]
        dst_root_dir - Path to where you would like to save your prepared scenario datasets (split into train and val) [Required]
        annotation_filename - The filename of every annotation file under scenario_root_dir [Default: 'output.json']
        skip_existing - If you terminated dataset preparation midway, you can skip the scenarios that were already made using skip_existing=True. [Default: False]
        val_target_proportion - The proportion of your scenario that you would like to allocate to validation. [Default: 0.05]
        min_val_size - The minimum number of images that you would like to use for validation. [Default: None]
        max_val_size - The maximum number of images that you would like to use for validation. [Default: None]
        orig_config_save - Where you would like to save the dataset configuration representing your scenario_root_dir. [Default: 'orig.yaml]
        reorganized_config_save - Where you would like to save the dataset configuration representing your dst_root_dir. [Default: 'dataset_config.yaml']
        show_pbar - Whether or not you would like to show a progress bar during preparation. [Default: True]

    Description:
        The datasets under each scenario directory will be combined and then split into a train + validation folder.
        The source root directory should have the following structure:
            scenario_root_dir
                scenario0
                    scenario0_part0
                    scenario0_part1
                    scenario0_part2
                    ...
                scenario1
                    scenario1_part0
                    scenario1_part1
                    scenario1_part2
                    ...
                scenario2
                    scenario2_part0
                    scenario2_part1
                    scenario2_part2
                    ...
                ...
        Note that there is no restriction on directory names, so the directory names should be anything.
        This method reads a fixed directory structure regardless of the directory names.
        Also note that it is necessary for the coco annotation file saved in each scenario part directory to have the same filename.
        If you need a more flexible approach for preparing your datasets, please define where your datasets are located in an excel sheet
        and use prepare_datasets_from_excel instead.

        The destination root directory will have the following structure:
            dst_root_dir
                scenario0
                    train
                    val
                scenario1
                    train
                    val
                scenario2
                    train
                    val
                ...
        
        The dataset configuration file saved at reorganized_config_save will reflect the directory structure of dst_root_dir.
        The configuration file representing the directory structure of the scenario_root_dir is saved under orig_config_save.

        Note that orig_config_save and reorganized_config_save do not have to be inside of dst_root_dir.
        On the contrary, it is recommended to not save orig_config_save and reorganized_config_save inside of dst_root_dir.
        It is recommended that you change the path of orig_config_save and reorganized_config_save everytime you make an addition to your datasets.
        This is because you will likely want to keep track of the previous states of your dataset configuration, and you
        may also want to rollback to a previous configuration at any given time.
    """
    make_dir_if_not_exists(dst_root_dir)
    if get_dir_contents_len(dst_root_dir) > 0 and not skip_existing:
        print(
            f'Directory {dst_root_dir} is not empty.\nAre you sure you want to delete the contents?'
        )
        answer = input('yes/no')
        if answer.lower() in ['yes', 'y']:
            delete_all_files_in_dir(dst_root_dir)
        elif answer.lower() in ['no', 'n']:
            print(f'Terminating program.')
            sys.exit()
        else:
            raise ValueError(f'Invalid answer: {answer}')

    # Gather datasets from source root directory and combine.
    scenario_names = get_dirnames_in_dir(scenario_root_dir)
    scenario_datasets = cast(List[COCO_Dataset], [])
    orig_collection_handler = DatasetConfigCollectionHandler()
    pbar = tqdm(total=len(scenario_names),
                unit='scenario(s)') if show_pbar else None
    if pbar is not None:
        pbar.set_description('Gathering Scenarios')
    for scenario_name in scenario_names:
        orig_scenario_collection = DatasetConfigCollection(tag=scenario_name)
        src_scenario_dir = f'{scenario_root_dir}/{scenario_name}'
        part_names = get_dirnames_in_dir(src_scenario_dir)
        part_datasets = cast(List[COCO_Dataset], [])
        part_dataset_dirs = cast(List[str], [])
        for part_name in part_names:
            src_part_dir = f'{src_scenario_dir}/{part_name}'
            src_part_ann_path = f'{src_part_dir}/{annotation_filename}'
            part_dataset = COCO_Dataset.load_from_path(
                json_path=src_part_ann_path, img_dir=src_part_dir)
            part_datasets.append(part_dataset)
            part_dataset_dirs.append(src_part_dir)
            orig_scenario_part_config = DatasetConfig(
                img_dir=src_part_dir,
                ann_path=src_part_ann_path,
                ann_format='coco',
                tag=part_name)
            orig_scenario_collection.append(orig_scenario_part_config)
        scenario_dataset = COCO_Dataset.combine(dataset_list=part_datasets,
                                                img_dir_list=part_dataset_dirs,
                                                show_pbar=False)
        scenario_datasets.append(scenario_dataset)
        orig_collection_handler.append(orig_scenario_collection)
        if pbar is not None:
            pbar.update()
    orig_collection_handler.save_to_path(orig_config_save, overwrite=True)
    pbar.close()

    # Split combined scenario datasets into train and val and save them.
    train_collection = DatasetConfigCollection(tag='train')
    val_collection = DatasetConfigCollection(tag='val')
    pbar = tqdm(total=len(scenario_names)) if show_pbar else None
    if pbar is not None:
        pbar.set_description('Splitting Scenarios Into Train/Val')
    for i in range(len(scenario_names)):
        dst_scenario_dir = f'{dst_root_dir}/{scenario_names[i]}'
        if dir_exists(dst_scenario_dir):
            if skip_existing:
                if pbar is not None:
                    pbar.update()
                continue
            else:
                raise FileExistsError(
                    f'Directory already exists: {dst_scenario_dir}')
        else:
            make_dir_if_not_exists(dst_scenario_dir)
        orig_num_images = len(scenario_datasets[i].images)
        assert orig_num_images >= 2, f'{scenario_names[i]} has only {orig_num_images} images, and thus cannot be split into train and val.'
        num_val = int(len(scenario_datasets[i].images) * val_target_proportion)
        num_val = 1 if num_val == 0 else num_val
        num_val = min_val_size if min_val_size is not None and num_val < min_val_size else num_val
        num_val = max_val_size if max_val_size is not None and num_val > max_val_size else num_val
        num_train = orig_num_images - num_val
        train_dataset, val_dataset = scenario_datasets[i].split_into_parts(
            ratio=[num_train, num_val], shuffle=True)

        dst_train_dir = f'{dst_scenario_dir}/train'
        make_dir_if_not_exists(dst_train_dir)
        train_dataset.move_images(dst_img_dir=dst_train_dir,
                                  preserve_filenames=False,
                                  update_img_paths=True,
                                  show_pbar=False)
        train_ann_path = f'{dst_train_dir}/output.json'
        train_dataset.save_to_path(train_ann_path, overwrite=True)
        train_dataset_config = DatasetConfig(img_dir=dst_train_dir,
                                             ann_path=train_ann_path,
                                             ann_format='coco',
                                             tag=f'{scenario_names[i]}_train')
        train_collection.append(train_dataset_config)

        dst_val_dir = f'{dst_scenario_dir}/val'
        make_dir_if_not_exists(dst_val_dir)
        val_dataset.move_images(dst_img_dir=dst_val_dir,
                                preserve_filenames=False,
                                update_img_paths=True,
                                show_pbar=False)
        val_ann_path = f'{dst_val_dir}/output.json'
        val_dataset.save_to_path(val_ann_path, overwrite=True)
        val_dataset_config = DatasetConfig(img_dir=dst_val_dir,
                                           ann_path=val_ann_path,
                                           ann_format='coco',
                                           tag=f'{scenario_names[i]}_val')
        val_collection.append(val_dataset_config)
        if pbar is not None:
            pbar.update()
    pbar.close()
    collection_handler = DatasetConfigCollectionHandler(
        [train_collection, val_collection])
    collection_handler.save_to_path(reorganized_config_save, overwrite=True)
Ejemplo n.º 19
0
#             gt_kpt_3d=kpt_3d,
#             corner_3d=corner_3d,
#             K=K,
#             distortion=np.array([0.001647, -0.105636, -0.002094, -0.006446, 0.000000]),
#             units_per_meter=1.0
#         )
#     pbar.update()
# pbar.close()
# frame_result_list.save_to_path(f'{infer_data_dump}/{model_name}_infer0.json', overwrite=True)

infer_data_dump_dir = 'infer_data_dump0'
infer_data_dump_paths = get_all_files_of_extension(infer_data_dump_dir,
                                                   extension='json')
infer_data_dump_paths.sort()
fixed_infer_data_dump_dir = 'infer_data_dump'
make_dir_if_not_exists(fixed_infer_data_dump_dir)
pbar = tqdm(total=len(infer_data_dump_paths), unit='dump(s)', leave=True)
pbar.set_description('Fixing Inference Dumps')
for infer_data_dump_path in infer_data_dump_paths:
    results = PVNetFrameResultList.load_from_path(infer_data_dump_path)
    for result in results:
        for pred in result.pred_list:
            pred.recalculate(gt_kpt_3d=kpt_3d,
                             corner_3d=corner_3d,
                             K=K,
                             distortion=distortion,
                             units_per_meter=1.0)
    results.save_to_path(
        f'{fixed_infer_data_dump_dir}/{get_filename(infer_data_dump_path)}',
        overwrite=True)
    pbar.update()
Ejemplo n.º 20
0
 def _prep_output_dir(self, measure_dir: str, whole_number_dir: str,
                      digit_dir: str):
     for output_dir in [measure_dir, whole_number_dir, digit_dir]:
         make_dir_if_not_exists(output_dir)
         delete_all_files_in_dir(output_dir, ask_permission=False)
Ejemplo n.º 21
0
from logger import logger
from annotation_utils.ndds.structs import NDDS_Dataset
from annotation_utils.coco.structs import COCO_Dataset, COCO_Category_Handler
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir

src_dir = '/home/clayton/workspace/prj/data_keep/data/ndds/20200924_yagura5_bright_1-colored'
dst_dir = 'nihonbashi_debug'
make_dir_if_not_exists(dst_dir)
delete_all_files_in_dir(dst_dir, ask_permission=False)

hsr_categories = COCO_Category_Handler.load_from_path(
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/config/categories/hsr_categories.json'
)

# Load NDDS Dataset
ndds_dataset = NDDS_Dataset.load_from_dir(json_dir=src_dir, show_pbar=True)

# Fix NDDS Dataset naming so that it follows convention. (This is not necessary if the NDDS dataset already follows the naming convention.)
for frame in ndds_dataset.frames:
    # Fix Naming Convention
    for ann_obj in frame.ndds_ann.objects:
        if ann_obj.class_name.lower() == 'nihonbashi':
            obj_type, obj_name = 'seg', 'hsr'
            instance_name = '0'
            ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'
        elif ann_obj.class_name.lower() in list('abcdefghijkl'):
            obj_type, obj_name = 'kpt', 'hsr'
            instance_name, contained_name = '0', ann_obj.class_name
            ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}_{contained_name}'
        else:
            logger.error(f'Unknown ann_obj.class_name: {ann_obj.class_name}')
Ejemplo n.º 22
0
import cv2
from annotation_utils.ndds.structs import NDDS_Dataset
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir
from common_utils.cv_drawing_utils import draw_bbox
from streamer.cv_viewer import cv_simple_image_viewer

# Load NDDS Dataset
ndds_dataset = NDDS_Dataset.load_from_dir(
    json_dir='/home/clayton/workspace/prj/data_keep/data/ndds/m1_200',
    show_pbar=True)

vis_dump_dir = 'vis_dump'
make_dir_if_not_exists(vis_dump_dir)
delete_all_files_in_dir(vis_dump_dir)

for frame in ndds_dataset.frames:
    img = cv2.imread(frame.img_path)
    for ndds_obj in frame.ndds_ann.objects:
        img = draw_bbox(img=img, bbox=ndds_obj.bounding_box)
    quit_flag = cv_simple_image_viewer(img=img, preview_width=1000)
    if quit_flag:
        break
Ejemplo n.º 23
0
# coco_ann_path = os.path.join(path, "json/bbox_resized.json")
# model = "COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml"mask_rcnn_R_101_FPN_3x
model_name = 'mask_rcnn_R_50_FPN_1x'
# model_name = 'mask_rcnn_R_101_FPN_3x'
# model = f"COCO-InstanceSegmentation/{model_name}.yaml"
# model_name = 'keypoint_rcnn_R_50_FPN_1x'
# model_name = 'keypoint_rcnn_R_101_FPN_3x'
model = f"COCO-Keypoints/{model_name}.yaml"
# model = "COCO-Keypoints/keypoint_rcnn_R_50_FPN_1x.yaml"
# model = "COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml"

_model_name = model.split('/')[0].split('-')[1] + '_'\
            + model.split('/')[1].split('_')[2] + '_'\
            + model.split('/')[1].split('_')[3] + '_'\
            + model.split('/')[1].split('_')[5].split('.')[0]
make_dir_if_not_exists(f'{path}/weights')
output_dir_path = f'{path}/weights/{_model_name}_aug_val_1'
# root_dir = '/home/doors/workspace/darwin/dataset_config/20200722_trainval'
# dataset_root_dir = f'{root_dir}/dataset_combined/all'
# root_dir=f'/home/jitesh/3d/data/coco_data/hook_sim_real_data6/output'
dataset_root_dir = path
# targets = get_dirnames_in_dir(dataset_root_dir)
# target_dirs = [f'{dataset_root_dir}/{target}' for target in targets]
# # target_train_dirs = [f'{target_dir}/train' for target_dir in target_dirs]
# target_train_dirs = [f'{target_dir}' for target_dir, target in zip(target_dirs, targets) if target.startswith('train')]
# target_val_dirs = [f'{target_dir}' for target_dir, target in zip(target_dirs, targets) if target.startswith('val')]
# # target_val_dirs = [f'{target_dir}/val' for target_dir in target_dirs]
# target_train_coco_ann_paths = [f'{target_train_dir}/coco/output.json' for target_train_dir in target_train_dirs]
# target_val_coco_ann_paths = [f'{target_val_dir}/coco/output.json' for target_val_dir in target_val_dirs]
# target_train_img_dirs = [f'{target_train_dir}/img' for target_train_dir in target_train_dirs]
# target_val_img_dirs = [f'{target_val_dir}/img' for target_val_dir in target_val_dirs]
Ejemplo n.º 24
0
from common_utils.file_utils import make_dir_if_not_exists
from annotation_utils.coco.structs import COCO_Dataset

src_img_dir = '/home/clayton/workspace/prj/data_keep/data/dataset/bird/img'
dst_dataset_root_dir = '/home/clayton/workspace/prj/data_keep/data/dataset/bird/dataset_root_dir'
make_dir_if_not_exists(dst_dataset_root_dir)

dataset = COCO_Dataset.load_from_path(  # 18 images -> 2 scenarios x 3 datasets / scenario x 3 images / dataset -> 2 train, 1 val
    json_path=f'{src_img_dir}/output.json',
    img_dir=src_img_dir)
scenario_names = [f'scenario{i}' for i in range(2)]
scenario_datasets = dataset.split_into_parts(ratio=[9, 9], shuffle=True)
for i in range(len(scenario_datasets)):
    scenario_name = f'scenario{i}'
    dst_scenario_dir = f'{dst_dataset_root_dir}/{scenario_name}'
    make_dir_if_not_exists(dst_scenario_dir)
    part_datasets = scenario_datasets[i].split_into_parts(ratio=[3, 3, 3],
                                                          shuffle=True)
    for j in range(len(part_datasets)):
        part_name = f'part{j}'
        dst_part_dir = f'{dst_scenario_dir}/{part_name}'
        make_dir_if_not_exists(dst_part_dir)
        part_datasets[j].move_images(dst_img_dir=dst_part_dir,
                                     preserve_filenames=True,
                                     update_img_paths=True,
                                     overwrite=True,
                                     show_pbar=False)
        part_datasets[j].save_to_path(save_path=f'{dst_part_dir}/output.json',
                                      overwrite=True)
        part_datasets[j].save_video(save_path=f'{dst_part_dir}/preview.avi',
                                    fps=5,