Exemplo n.º 1
0
 def init_save_dir(self):
     make_dir_if_not_exists(self._save_dir)
     if self._clear:
         delete_all_files_in_dir(self._save_dir, ask_permission=False)
     else:
         self._existing_extensions = list(set([get_extension_from_path(path) for path in get_valid_image_paths(self._save_dir)]))
     self._first = False
Exemplo n.º 2
0
    def save_to_dir(self,
                    json_save_dir: str,
                    src_img_dir: str,
                    dst_img_dir: str = None,
                    overwrite: bool = False,
                    show_pbar: bool = False):
        """Saves NDDS_Frame_Handler object to a directory path.

        Arguments:
            json_save_dir {str} -- [Path to directory where you want to save the NDDS annotation json files.]
            src_img_dir {str} -- [Path to directory where the original NDDS images are saved.]

        Keyword Arguments:
            dst_img_dir {str} -- [Path to directory where you want to copy the original NDDS images.] (default: {None})
            overwrite {bool} -- [Whether or not you would like to overwrite existing files/directories.] (default: {False})
            show_pbar {bool} -- [Whether or not you would like to show the progress bar.] (default: {False})
        """
        self._check_paths_valid(src_img_dir=src_img_dir)
        make_dir_if_not_exists(json_save_dir)
        delete_all_files_in_dir(json_save_dir, ask_permission=not overwrite)
        if dst_img_dir is not None:
            make_dir_if_not_exists(dst_img_dir)
            delete_all_files_in_dir(dst_img_dir, ask_permission=not overwrite)

        if show_pbar:
            pbar = tqdm(total=len(self), unit='ann(s)', leave=True)
            pbar.set_description(f'Saving {self.__class__.__name__}')
        for frame in self:
            save_path = f'{json_save_dir}/{get_rootname_from_path(frame.img_path)}.json'
            if dst_img_dir is not None:
                copy_file(
                    src_path=f'{src_img_dir}/{get_filename(frame.img_path)}',
                    dest_path=f'{dst_img_dir}/{get_filename(frame.img_path)}',
                    silent=True)
                if frame.cs_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.cs_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.cs_img_path)}',
                        silent=True)
                if frame.depth_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.depth_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.depth_img_path)}',
                        silent=True)
                if frame.is_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.is_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.is_img_path)}',
                        silent=True)
                frame.ndds_ann.save_to_path(save_path=save_path)
            else:
                frame.ndds_ann.save_to_path(save_path=save_path)
            if show_pbar:
                pbar.update()
Exemplo n.º 3
0
 def setup_directories(self, verbose: bool=False):
     make_dir_if_not_exists(self.dest_dir)
     for target_dir in [self.train_dir, self.test_dir, self.val_dir]:
         if dir_exists(target_dir):
             delete_all_files_in_dir(dir_path=target_dir, ask_permission=False, verbose=verbose)
         else:
             make_dir(target_dir)
         img_dir = f"{target_dir}/{self.target_img_dir}"
         ann_dir = f"{target_dir}/{self.target_ann_dir}"
         make_dir(img_dir)
         make_dir(ann_dir)
Exemplo n.º 4
0
    def save_to_dir(self,
                    json_save_dir: str,
                    src_img_dir: str,
                    overwrite: bool = False,
                    dst_img_dir: str = None,
                    show_pbar: bool = True):
        self._check_paths_valid(src_img_dir=src_img_dir)
        make_dir_if_not_exists(json_save_dir)
        delete_all_files_in_dir(json_save_dir, ask_permission=not overwrite)
        if dst_img_dir is not None:
            make_dir_if_not_exists(dst_img_dir)
            delete_all_files_in_dir(dst_img_dir, ask_permission=not overwrite)

        if show_pbar:
            pbar = tqdm(total=len(self), unit='ann(s)', leave=True)
            pbar.set_description(f'Saving {self.__class__.__name__}')
        for frame in self:
            save_path = f'{json_save_dir}/{get_rootname_from_path(frame.img_path)}.json'
            if dst_img_dir is not None:
                copy_file(
                    src_path=f'{src_img_dir}/{get_filename(frame.img_path)}',
                    dest_path=f'{dst_img_dir}/{get_filename(frame.img_path)}',
                    silent=True)
                if frame.cs_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.cs_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.cs_img_path)}',
                        silent=True)
                if frame.depth_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.depth_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.depth_img_path)}',
                        silent=True)
                if frame.is_img_path:
                    copy_file(
                        src_path=
                        f'{src_img_dir}/{get_filename(frame.is_img_path)}',
                        dest_path=
                        f'{dst_img_dir}/{get_filename(frame.is_img_path)}',
                        silent=True)
                frame.ndds_ann.save_to_path(save_path=save_path)
            else:
                frame.ndds_ann.save_to_path(save_path=save_path)
            if show_pbar:
                pbar.update()
Exemplo n.º 5
0
    def save_to_dir(self, json_save_dir: str, src_img_dir: str, overwrite: bool=False, dst_img_dir: str=None):
        self._check_paths_valid(src_img_dir=src_img_dir)
        make_dir_if_not_exists(json_save_dir)
        delete_all_files_in_dir(json_save_dir, ask_permission=not overwrite)
        if dst_img_dir is not None:
            make_dir_if_not_exists(dst_img_dir)
            delete_all_files_in_dir(dst_img_dir, ask_permission=not overwrite)

        for ann in tqdm(self, total=len(self), unit='ann', leave=True):
            save_path = f'{json_save_dir}/{get_rootname_from_path(ann.img_path)}.json'
            src_img_path = f'{src_img_dir}/{get_filename(ann.img_path)}'
            if dst_img_dir is not None:
                dst_img_path = f'{dst_img_dir}/{get_filename(ann.img_path)}'
                copy_file(src_path=src_img_path, dest_path=dst_img_path, silent=True)
                ann.save_to_path(save_path=save_path, img_path=dst_img_path)
            else:
                ann.save_to_path(save_path=save_path, img_path=src_img_path)
Exemplo n.º 6
0
    def save_to_dir(self, json_save_dir: str, src_img_dir: str, overwrite: bool=False, dst_img_dir: str=None, show_pbar: bool=True):
        self._check_paths_valid(src_img_dir=src_img_dir)
        make_dir_if_not_exists(json_save_dir)
        delete_all_files_in_dir(json_save_dir, ask_permission=not overwrite)
        if dst_img_dir is not None:
            make_dir_if_not_exists(dst_img_dir)
            delete_all_files_in_dir(dst_img_dir, ask_permission=not overwrite)

        pbar = tqdm(total=len(self), unit='annotation(s)', leave=True) if show_pbar else None
        if pbar is not None:
            pbar.set_description('Writing Labelme Annotations')
        for ann in self:
            save_path = f'{json_save_dir}/{get_rootname_from_path(ann.img_path)}.json'
            src_img_path = f'{src_img_dir}/{get_filename(ann.img_path)}'
            if dst_img_dir is not None:
                dst_img_path = f'{dst_img_dir}/{get_filename(ann.img_path)}'
                copy_file(src_path=src_img_path, dest_path=dst_img_path, silent=True)
                ann.save_to_path(save_path=save_path, img_path=dst_img_path)
            else:
                ann.save_to_path(save_path=save_path, img_path=src_img_path)
            if pbar is not None:
                pbar.update()
        if pbar is not None:
            pbar.close()
Exemplo n.º 7
0
from logger import logger
from annotation_utils.ndds.structs import NDDS_Dataset
from annotation_utils.coco.structs import COCO_Dataset, COCO_Category_Handler
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir

src_dir = '/home/clayton/workspace/prj/data_keep/data/ndds/20200924_yagura5_bright_1-colored'
dst_dir = 'nihonbashi_debug'
make_dir_if_not_exists(dst_dir)
delete_all_files_in_dir(dst_dir, ask_permission=False)

hsr_categories = COCO_Category_Handler.load_from_path(
    '/home/clayton/workspace/prj/data_keep/data/toyota/dataset/config/categories/hsr_categories.json'
)

# Load NDDS Dataset
ndds_dataset = NDDS_Dataset.load_from_dir(json_dir=src_dir, show_pbar=True)

# Fix NDDS Dataset naming so that it follows convention. (This is not necessary if the NDDS dataset already follows the naming convention.)
for frame in ndds_dataset.frames:
    # Fix Naming Convention
    for ann_obj in frame.ndds_ann.objects:
        if ann_obj.class_name.lower() == 'nihonbashi':
            obj_type, obj_name = 'seg', 'hsr'
            instance_name = '0'
            ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}'
        elif ann_obj.class_name.lower() in list('abcdefghijkl'):
            obj_type, obj_name = 'kpt', 'hsr'
            instance_name, contained_name = '0', ann_obj.class_name
            ann_obj.class_name = f'{obj_type}_{obj_name}_{instance_name}_{contained_name}'
        else:
            logger.error(f'Unknown ann_obj.class_name: {ann_obj.class_name}')
    ignore_unspecified_categories=True,
    show_pbar=True,
    bbox_area_threshold=1,
    default_visibility_threshold=0.10,
    visibility_threshold_dict={'measure': 0.01},
    allow_unfound_seg=True,
    class_merge_map={
        'mark_10th_place': 'seg_measure',
        'marking_bottom': 'seg_measure',
        'marking_top': 'seg_measure',
        'hook': 'seg_measure'
    })

# Output Directories
make_dir_if_not_exists('measure_coco')
delete_all_files_in_dir('measure_coco')

measure_dir = 'measure_coco/measure'
whole_number_dir = 'measure_coco/whole_number'
digit_dir = 'measure_coco/digit'
json_output_filename = 'output.json'

measure_dataset, whole_number_dataset, digit_dataset = dataset.split_measure_dataset(
    measure_dir=measure_dir,
    whole_number_dir=whole_number_dir,
    digit_dir=digit_dir,
    allow_no_measures=True,
    allow_missing_parts=True)

if False:  # Change to True if you want to remove all segmentation from the measure dataset.
    from common_utils.common_types.segmentation import Segmentation
Exemplo n.º 9
0
from logger import logger
from annotation_utils.ndds.structs import NDDS_Dataset
from annotation_utils.coco.structs import COCO_Dataset, COCO_Category_Handler
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir

src_root_dir = '/home/doors/workspace/oobayashi/hsr_data_4k'
targets = [
    '20200924_yagura5_bright_2-colored', 'yagura5_dark_2', 'yagura5_bright_2',
    'yagura5_dark_1', '20200924_yagura5_dark_2-colored',
    '20200924_yagura5_bright_1-colored', '20200924_yagura5_dark_1-colored',
    'yagura5_bright_1'
]
dst_root_dir = '/home/doors/workspace/prj/data_keep/data/toyota/dataset/sim/20200928'
video_preview_dir = f'{dst_root_dir}/preview'
make_dir_if_not_exists(video_preview_dir)
delete_all_files_in_dir(video_preview_dir, ask_permission=False)

hsr_categories = COCO_Category_Handler.load_from_path(
    '/home/doors/workspace/prj/data_keep/data/toyota/dataset/config/categories/hsr_categories.json'
)

for target in targets:
    src_target_dir = f'{src_root_dir}/{target}'
    dst_target_dir = f'{dst_root_dir}/{target}'
    make_dir_if_not_exists(dst_target_dir)
    delete_all_files_in_dir(dst_target_dir, ask_permission=False)

    # Load NDDS Dataset
    ndds_dataset = NDDS_Dataset.load_from_dir(json_dir=src_target_dir,
                                              show_pbar=True)
Exemplo n.º 10
0
from annotation_utils.coco.structs import COCO_Category_Handler, COCO_Category
from annotation_utils.coco.structs import COCO_Dataset
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir
from typing import cast
from annotation_utils.coco.structs import COCO_Image, COCO_Annotation
from common_utils.path_utils import get_rootname_from_filename, get_extension_from_filename
from common_utils.common_types.point import Point2D
from tqdm import tqdm
import cv2
from logger import logger
from typing import List

target_src_dir = '/home/clayton/workspace/prj/data_keep/data/ndds/bolt_markMap_2020.08.18-13.03.35'
target_dst_dir = 'bolt_kpt'
make_dir_if_not_exists(target_dst_dir)
delete_all_files_in_dir(target_dst_dir)

# Load NDDS Dataset
logger.info('Loading NDDS Dataset')
ndds_dataset = NDDS_Dataset.load_from_dir(
    json_dir=target_src_dir,
    show_pbar=True
)
delete_idx_list = []


# Fix NDDS Dataset naming so that it follows convention. (This is not necessary if the NDDS dataset already follows the naming convention.)
for i, frame in enumerate(ndds_dataset.frames):
    for ann_obj in frame.ndds_ann.objects:
        if ann_obj.class_name.startswith('bolt'):
            if ann_obj.visibility == 0 :
Exemplo n.º 11
0
import cv2
from annotation_utils.ndds.structs import NDDS_Dataset
from common_utils.file_utils import make_dir_if_not_exists, delete_all_files_in_dir
from common_utils.cv_drawing_utils import draw_bbox
from streamer.cv_viewer import cv_simple_image_viewer

# Load NDDS Dataset
ndds_dataset = NDDS_Dataset.load_from_dir(
    json_dir='/home/clayton/workspace/prj/data_keep/data/ndds/m1_200',
    show_pbar=True)

vis_dump_dir = 'vis_dump'
make_dir_if_not_exists(vis_dump_dir)
delete_all_files_in_dir(vis_dump_dir)

for frame in ndds_dataset.frames:
    img = cv2.imread(frame.img_path)
    for ndds_obj in frame.ndds_ann.objects:
        img = draw_bbox(img=img, bbox=ndds_obj.bounding_box)
    quit_flag = cv_simple_image_viewer(img=img, preview_width=1000)
    if quit_flag:
        break
Exemplo n.º 12
0
 def _prep_output_dir(self, measure_dir: str, whole_number_dir: str,
                      digit_dir: str):
     for output_dir in [measure_dir, whole_number_dir, digit_dir]:
         make_dir_if_not_exists(output_dir)
         delete_all_files_in_dir(output_dir, ask_permission=False)
Exemplo n.º 13
0
def prepare_datasets_from_dir(
        scenario_root_dir: str,
        dst_root_dir: str,
        annotation_filename: str = 'output.json',
        skip_existing: bool = False,
        val_target_proportion: float = 0.05,
        min_val_size: int = None,
        max_val_size: int = None,
        orig_config_save: str = 'orig.yaml',
        reorganized_config_save: str = 'dataset_config.yaml',
        show_pbar: bool = True):
    """
    Parameters:
        scenario_root_dir - Path to the source root directory containing all of your scenario folders. [Required]
        dst_root_dir - Path to where you would like to save your prepared scenario datasets (split into train and val) [Required]
        annotation_filename - The filename of every annotation file under scenario_root_dir [Default: 'output.json']
        skip_existing - If you terminated dataset preparation midway, you can skip the scenarios that were already made using skip_existing=True. [Default: False]
        val_target_proportion - The proportion of your scenario that you would like to allocate to validation. [Default: 0.05]
        min_val_size - The minimum number of images that you would like to use for validation. [Default: None]
        max_val_size - The maximum number of images that you would like to use for validation. [Default: None]
        orig_config_save - Where you would like to save the dataset configuration representing your scenario_root_dir. [Default: 'orig.yaml]
        reorganized_config_save - Where you would like to save the dataset configuration representing your dst_root_dir. [Default: 'dataset_config.yaml']
        show_pbar - Whether or not you would like to show a progress bar during preparation. [Default: True]

    Description:
        The datasets under each scenario directory will be combined and then split into a train + validation folder.
        The source root directory should have the following structure:
            scenario_root_dir
                scenario0
                    scenario0_part0
                    scenario0_part1
                    scenario0_part2
                    ...
                scenario1
                    scenario1_part0
                    scenario1_part1
                    scenario1_part2
                    ...
                scenario2
                    scenario2_part0
                    scenario2_part1
                    scenario2_part2
                    ...
                ...
        Note that there is no restriction on directory names, so the directory names should be anything.
        This method reads a fixed directory structure regardless of the directory names.
        Also note that it is necessary for the coco annotation file saved in each scenario part directory to have the same filename.
        If you need a more flexible approach for preparing your datasets, please define where your datasets are located in an excel sheet
        and use prepare_datasets_from_excel instead.

        The destination root directory will have the following structure:
            dst_root_dir
                scenario0
                    train
                    val
                scenario1
                    train
                    val
                scenario2
                    train
                    val
                ...
        
        The dataset configuration file saved at reorganized_config_save will reflect the directory structure of dst_root_dir.
        The configuration file representing the directory structure of the scenario_root_dir is saved under orig_config_save.

        Note that orig_config_save and reorganized_config_save do not have to be inside of dst_root_dir.
        On the contrary, it is recommended to not save orig_config_save and reorganized_config_save inside of dst_root_dir.
        It is recommended that you change the path of orig_config_save and reorganized_config_save everytime you make an addition to your datasets.
        This is because you will likely want to keep track of the previous states of your dataset configuration, and you
        may also want to rollback to a previous configuration at any given time.
    """
    make_dir_if_not_exists(dst_root_dir)
    if get_dir_contents_len(dst_root_dir) > 0 and not skip_existing:
        print(
            f'Directory {dst_root_dir} is not empty.\nAre you sure you want to delete the contents?'
        )
        answer = input('yes/no')
        if answer.lower() in ['yes', 'y']:
            delete_all_files_in_dir(dst_root_dir)
        elif answer.lower() in ['no', 'n']:
            print(f'Terminating program.')
            sys.exit()
        else:
            raise ValueError(f'Invalid answer: {answer}')

    # Gather datasets from source root directory and combine.
    scenario_names = get_dirnames_in_dir(scenario_root_dir)
    scenario_datasets = cast(List[COCO_Dataset], [])
    orig_collection_handler = DatasetConfigCollectionHandler()
    pbar = tqdm(total=len(scenario_names),
                unit='scenario(s)') if show_pbar else None
    if pbar is not None:
        pbar.set_description('Gathering Scenarios')
    for scenario_name in scenario_names:
        orig_scenario_collection = DatasetConfigCollection(tag=scenario_name)
        src_scenario_dir = f'{scenario_root_dir}/{scenario_name}'
        part_names = get_dirnames_in_dir(src_scenario_dir)
        part_datasets = cast(List[COCO_Dataset], [])
        part_dataset_dirs = cast(List[str], [])
        for part_name in part_names:
            src_part_dir = f'{src_scenario_dir}/{part_name}'
            src_part_ann_path = f'{src_part_dir}/{annotation_filename}'
            part_dataset = COCO_Dataset.load_from_path(
                json_path=src_part_ann_path, img_dir=src_part_dir)
            part_datasets.append(part_dataset)
            part_dataset_dirs.append(src_part_dir)
            orig_scenario_part_config = DatasetConfig(
                img_dir=src_part_dir,
                ann_path=src_part_ann_path,
                ann_format='coco',
                tag=part_name)
            orig_scenario_collection.append(orig_scenario_part_config)
        scenario_dataset = COCO_Dataset.combine(dataset_list=part_datasets,
                                                img_dir_list=part_dataset_dirs,
                                                show_pbar=False)
        scenario_datasets.append(scenario_dataset)
        orig_collection_handler.append(orig_scenario_collection)
        if pbar is not None:
            pbar.update()
    orig_collection_handler.save_to_path(orig_config_save, overwrite=True)
    pbar.close()

    # Split combined scenario datasets into train and val and save them.
    train_collection = DatasetConfigCollection(tag='train')
    val_collection = DatasetConfigCollection(tag='val')
    pbar = tqdm(total=len(scenario_names)) if show_pbar else None
    if pbar is not None:
        pbar.set_description('Splitting Scenarios Into Train/Val')
    for i in range(len(scenario_names)):
        dst_scenario_dir = f'{dst_root_dir}/{scenario_names[i]}'
        if dir_exists(dst_scenario_dir):
            if skip_existing:
                if pbar is not None:
                    pbar.update()
                continue
            else:
                raise FileExistsError(
                    f'Directory already exists: {dst_scenario_dir}')
        else:
            make_dir_if_not_exists(dst_scenario_dir)
        orig_num_images = len(scenario_datasets[i].images)
        assert orig_num_images >= 2, f'{scenario_names[i]} has only {orig_num_images} images, and thus cannot be split into train and val.'
        num_val = int(len(scenario_datasets[i].images) * val_target_proportion)
        num_val = 1 if num_val == 0 else num_val
        num_val = min_val_size if min_val_size is not None and num_val < min_val_size else num_val
        num_val = max_val_size if max_val_size is not None and num_val > max_val_size else num_val
        num_train = orig_num_images - num_val
        train_dataset, val_dataset = scenario_datasets[i].split_into_parts(
            ratio=[num_train, num_val], shuffle=True)

        dst_train_dir = f'{dst_scenario_dir}/train'
        make_dir_if_not_exists(dst_train_dir)
        train_dataset.move_images(dst_img_dir=dst_train_dir,
                                  preserve_filenames=False,
                                  update_img_paths=True,
                                  show_pbar=False)
        train_ann_path = f'{dst_train_dir}/output.json'
        train_dataset.save_to_path(train_ann_path, overwrite=True)
        train_dataset_config = DatasetConfig(img_dir=dst_train_dir,
                                             ann_path=train_ann_path,
                                             ann_format='coco',
                                             tag=f'{scenario_names[i]}_train')
        train_collection.append(train_dataset_config)

        dst_val_dir = f'{dst_scenario_dir}/val'
        make_dir_if_not_exists(dst_val_dir)
        val_dataset.move_images(dst_img_dir=dst_val_dir,
                                preserve_filenames=False,
                                update_img_paths=True,
                                show_pbar=False)
        val_ann_path = f'{dst_val_dir}/output.json'
        val_dataset.save_to_path(val_ann_path, overwrite=True)
        val_dataset_config = DatasetConfig(img_dir=dst_val_dir,
                                           ann_path=val_ann_path,
                                           ann_format='coco',
                                           tag=f'{scenario_names[i]}_val')
        val_collection.append(val_dataset_config)
        if pbar is not None:
            pbar.update()
    pbar.close()
    collection_handler = DatasetConfigCollectionHandler(
        [train_collection, val_collection])
    collection_handler.save_to_path(reorganized_config_save, overwrite=True)
Exemplo n.º 14
0
    def move(self,
             dst_dataroot: str,
             include_depth: bool = True,
             include_RT: bool = False,
             camera_path: str = None,
             fps_path: str = None,
             preserve_filename: bool = False,
             use_softlink: bool = False,
             ask_permission_on_delete: bool = True,
             show_pbar: bool = True):
        make_dir_if_not_exists(dst_dataroot)
        delete_all_files_in_dir(dst_dataroot,
                                ask_permission=ask_permission_on_delete,
                                verbose=False)
        processed_image_id_list = []
        pbar = tqdm(total=len(self.annotations),
                    unit='annotation(s)',
                    leave=True) if show_pbar else None
        if pbar is not None:
            pbar.set_description('Moving Linemod Dataset Data')
        for linemod_ann in self.annotations:
            if not dir_exists(linemod_ann.data_root):
                raise FileNotFoundError(
                    f"Couldn't find data_root at {linemod_ann.data_root}")

            # Images
            linemod_image = self.images.get(id=linemod_ann.image_id)[0]
            if linemod_image.id not in processed_image_id_list:
                img_path = f'{linemod_ann.data_root}/{get_filename(linemod_image.file_name)}'
                if not file_exists(img_path):
                    raise FileNotFoundError(
                        f"Couldn't find image at {img_path}")
                if preserve_filename:
                    dst_img_path = f'{dst_dataroot}/{get_filename(linemod_image.file_name)}'
                    if file_exists(dst_img_path):
                        raise FileExistsError(f"""
                            Image already exists at {dst_img_path}
                            Hint: Use preserve_filename=False to bypass this error.
                            """)
                else:
                    dst_filename = f'{linemod_image.id}.{get_extension_from_filename(linemod_image.file_name)}'
                    linemod_image.file_name = dst_filename
                    dst_img_path = f'{dst_dataroot}/{dst_filename}'
                if not use_softlink:
                    copy_file(src_path=img_path,
                              dest_path=dst_img_path,
                              silent=True)
                else:
                    create_softlink(src_path=rel_to_abs_path(img_path),
                                    dst_path=rel_to_abs_path(dst_img_path))
                processed_image_id_list.append(linemod_image.id)

            # Masks
            if not file_exists(linemod_ann.mask_path):
                raise FileNotFoundError(
                    f"Couldn't find mask at {linemod_ann.mask_path}")
            mask_path = linemod_ann.mask_path
            if preserve_filename:
                dst_mask_path = f'{dst_dataroot}/{get_filename(linemod_ann.mask_path)}'
                if file_exists(dst_mask_path):
                    raise FileExistsError(f"""
                        Mask already exists at {dst_mask_path}
                        Hint: Use preserve_filename=False to bypass this error.
                        """)
            else:
                mask_filename = get_filename(linemod_ann.mask_path)
                dst_filename = f'{linemod_ann.id}_mask.{get_extension_from_filename(mask_filename)}'
                dst_mask_path = f'{dst_dataroot}/{dst_filename}'
                linemod_ann.mask_path = dst_mask_path
            if not use_softlink:
                copy_file(src_path=mask_path,
                          dest_path=dst_mask_path,
                          silent=True)
            else:
                create_softlink(src_path=rel_to_abs_path(mask_path),
                                dst_path=rel_to_abs_path(dst_mask_path))

            # Depth
            if include_depth and linemod_ann.depth_path is not None:
                if not file_exists(linemod_ann.depth_path):
                    raise FileNotFoundError(
                        f"Couldn't find depth at {linemod_ann.depth_path}")
                depth_path = linemod_ann.depth_path
                if preserve_filename:
                    dst_depth_path = f'{dst_dataroot}/{get_filename(linemod_ann.depth_path)}'
                    if file_exists(dst_depth_path):
                        raise FileExistsError(f"""
                            Depth already exists at {dst_depth_path}
                            Hint: Use preserve_filename=False to bypass this error.
                            """)
                else:
                    depth_filename = get_filename(linemod_ann.depth_path)
                    dst_filename = f'{linemod_ann.id}_depth.{get_extension_from_filename(depth_filename)}'
                    dst_depth_path = f'{dst_dataroot}/{dst_filename}'
                    linemod_ann.depth_path = dst_depth_path
                if not use_softlink:
                    copy_file(src_path=depth_path,
                              dest_path=dst_depth_path,
                              silent=True)
                else:
                    create_softlink(src_path=rel_to_abs_path(depth_path),
                                    dst_path=rel_to_abs_path(dst_depth_path))

            # RT pickle files
            if include_RT:
                rootname = get_rootname_from_path(mask_path)
                if rootname.endswith('_mask'):
                    rootname = rootname.replace('_mask', '')
                rt_filename = f'{rootname}_RT.pkl'
                rt_path = f'{linemod_ann.data_root}/{rt_filename}'
                if not file_exists(rt_path):
                    raise FileNotFoundError(
                        f"Couldn't find RT pickle file at {rt_path}")
                if preserve_filename:
                    dst_rt_path = f'{dst_dataroot}/{rt_filename}'
                    if file_exists(dst_depth_path):
                        raise FileExistsError(f"""
                            RT pickle file already exists at {dst_rt_path}
                            Hint: Use preserve_filename=False to bypass this error.
                            """)
                else:
                    dst_rt_filename = f'{linemod_ann.id}_RT.pkl'
                    dst_rt_path = f'{dst_dataroot}/{dst_rt_filename}'
                if not use_softlink:
                    copy_file(src_path=rt_path,
                              dest_path=dst_rt_path,
                              silent=True)
                else:
                    create_softlink(src_path=rel_to_abs_path(rt_path),
                                    dst_path=rel_to_abs_path(dst_rt_path))
            if pbar is not None:
                pbar.update()
        # Camera setting
        if camera_path is not None:
            if not file_exists(camera_path):
                raise FileNotFoundError(
                    f"Couldn't find camera settings at {camera_path}")
            dst_camera_path = f'{dst_dataroot}/{get_filename(camera_path)}'
            if file_exists(dst_camera_path):
                raise FileExistsError(
                    f'Camera settings already saved at {dst_camera_path}')
            if not use_softlink:
                copy_file(src_path=camera_path,
                          dest_path=dst_camera_path,
                          silent=True)
            else:
                create_softlink(src_path=rel_to_abs_path(camera_path),
                                dst_path=rel_to_abs_path(dst_camera_path))

        # FPS setting
        if fps_path is not None:
            if not file_exists(fps_path):
                raise FileNotFoundError(
                    f"Couldn't find FPS settings at {fps_path}")
            dst_fps_path = f'{dst_dataroot}/{get_filename(fps_path)}'
            if file_exists(dst_fps_path):
                raise FileExistsError(
                    f'FPS settings already saved at {dst_fps_path}')
            if not use_softlink:
                copy_file(src_path=fps_path,
                          dest_path=dst_fps_path,
                          silent=True)
            else:
                create_softlink(src_path=rel_to_abs_path(fps_path),
                                dst_path=rel_to_abs_path(dst_fps_path))
        if pbar is not None:
            pbar.close()
Exemplo n.º 15
0
def gen_infer_comparison(gt: BasicLoadableHandler,
                         dt: BasicLoadableHandler,
                         error: BasicLoadableHandler,
                         model_names: List[str],
                         test_names: List[str],
                         collage_shape: (int, int),
                         test_img_dir_map: Dict[str, str] = None,
                         model_aliases: Dict[str, str] = None,
                         test_aliases: Dict[str, str] = None,
                         video_save: str = None,
                         img_dump_dir: str = None,
                         show_preview: bool = False,
                         show_pbar: bool = True,
                         draw_settings=None,
                         draw_inference: bool = False,
                         details_func=None,
                         debug_verbose: bool = False):
    for handler in [gt, dt, error]:
        assert isinstance(handler, BasicLoadableHandler)
        for attr_key in ['frame', 'test_name']:
            assert hasattr(handler[0], attr_key)
    for handler in [dt, error]:
        assert hasattr(handler[0], 'model_name')
    model_names0 = list(set([datum.model_name for datum in dt
                             ])) if model_names == 'all' else model_names
    test_names0 = list(set([datum.test_name for datum in gt
                            ])) if test_names == 'all' else test_names
    for val_list in [model_names0, test_names0]:
        if val_list != 'all':
            assert isinstance(val_list, (tuple, list))
            for val in val_list:
                assert isinstance(val, str)
    assert isinstance(collage_shape, (tuple, list))
    for val in collage_shape:
        assert isinstance(val, int)
    assert len(collage_shape) == 2
    assert len(model_names0) <= collage_shape[0] * collage_shape[1]
    if img_dump_dir is not None:
        make_dir_if_not_exists(img_dump_dir)
        delete_all_files_in_dir(img_dump_dir, ask_permission=False)
    if test_img_dir_map is None:
        test_img_dir_map0 = {test_name: test_name for test_name in test_names0}
    else:
        assert isinstance(test_img_dir_map, dict)
        for key, val in test_img_dir_map.items():
            assert key in test_names0
            assert isinstance(key, str)
            assert isinstance(val, str)
        test_img_dir_map0 = {
            test_name: (test_img_dir_map[test_name] if test_name in test_img_dir_map else test_name) \
            for test_name in test_names0
        }
    for test_name, img_dir in test_img_dir_map0.items():
        if not dir_exists(img_dir):
            raise FileNotFoundError(f"""
                Couldn't find image directory {img_dir} for {test_name}.
                Please modify test_img_dir_map to match the image directory path for {test_name}.
                test_img_dir_map: {test_img_dir_map0}
                """)
    stream_writer = StreamWriter(show_preview=show_preview,
                                 video_save_path=video_save,
                                 dump_dir=img_dump_dir)

    total_images = len(gt.get(test_name=test_names0))
    pbar = tqdm(total=total_images, unit='image(s)',
                leave=True) if show_pbar else None
    if pbar is not None:
        pbar.set_description('Generating Comparison')
    for test_name in test_names0:
        if img_dump_dir is not None:
            test_img_dump_dir = f'{img_dump_dir}/{test_name}'
            make_dir_if_not_exists(test_img_dump_dir)
            stream_writer.dump_writer._save_dir = test_img_dump_dir

        img_dir = test_img_dir_map0[test_name]
        gt_test_data = gt.get(test_name=test_name)
        gt_test_data.sort(attr_name='frame')
        dt_test_data = dt.get(test_name=test_name)
        dt_test_data.sort(attr_name='frame')
        error_test_data = error.get(test_name=test_name)
        error_test_data.sort(attr_name='frame')

        for gt_datum in gt_test_data:
            file_name = gt_datum.frame
            img_path = f'{img_dir}/{file_name}'
            if not file_exists(img_path):
                if debug_verbose:
                    print(f"""
                        Couldn't find image. Skipping.
                            test_name: {test_name}
                            img_path: {img_path}
                        """)
                pbar.update()
                continue
            img = cv2.imread(img_path)
            dt_frame_data = dt_test_data.get(frame=gt_datum.frame)
            error_frame_data = error_test_data.get(frame=gt_datum.frame)

            img_buffer = cast(List[np.ndarray], [])
            for model_name in model_names0:
                dt_model_data = dt_frame_data.get(model_name=model_name)
                dt_model_datum = dt_model_data[0] if len(
                    dt_model_data) > 0 else None
                error_model_data = error_frame_data.get(model_name=model_name)
                error_datum = error_model_data[0] if len(
                    error_model_data) > 0 else None

                result = img.copy()
                if draw_inference or draw_settings is not None:
                    if draw_settings is not None:
                        if dt_model_datum is not None:
                            result = dt_model_datum.draw(
                                result, settings=draw_settings)
                    else:
                        if dt_model_datum is not None:
                            result = dt_model_datum.draw(result)

                if test_aliases is not None and gt_datum.test_name in test_aliases:
                    test_text = test_aliases[gt_datum.test_name]
                else:
                    test_text = gt_datum.test_name if gt_datum is not None else None
                if model_aliases is not None and dt_model_datum.model_name in model_aliases:
                    model_text = model_aliases[dt_model_datum.model_name]
                else:
                    model_text = dt_model_datum.model_name if dt_model_datum is not None else None

                if details_func is not None:
                    for key in ['gt', 'dt', 'error']:
                        assert key in details_func.__annotations__, f'{details_func.__name__} must have a {key} parameter.'
                    details_func_params = {
                        'img': result,
                        'gt': gt_datum,
                        'dt': dt_model_datum,
                        'error': error_datum
                    }
                    suggested_params = {
                        'test_text': test_text,
                        'model_text': model_text,
                        'frame_text': gt_datum.frame
                    }
                    for key, val in suggested_params.items():
                        if key in details_func.__annotations__:
                            details_func_params[key] = val
                    result = details_func(**details_func_params)
                else:
                    row_text_list = [
                        f'Test: {test_text}', f'Model: {model_text}',
                        f'Frame: {gt_datum.frame}'
                    ]
                    result_h, result_w = result.shape[:2]
                    combined_row_height = len(row_text_list) * 0.04 * result_h
                    result = draw_text_rows_at_point(
                        img=result,
                        row_text_list=row_text_list,
                        x=result_w * 0.01,
                        y=result_h * 0.01,
                        combined_row_height=combined_row_height)
                img_buffer.append(result)
            collage_img = collage_from_img_buffer(img_buffer=img_buffer,
                                                  collage_shape=collage_shape)
            stream_writer.step(img=collage_img, file_name=file_name)
            if pbar is not None:
                pbar.update()
    if pbar is not None:
        pbar.close()