def setup_dataset(config, mode, requirements, **kwargs):
    """
    Create a dataset class

    Parameters
    ----------
    config : CfgNode
        Configuration (cf. configs/default_config.py)
    mode : str {'train', 'validation', 'test'}
        Mode from which we want the dataset
    requirements : dict (string -> bool)
        Different requirements for dataset loading (gt_depth, gt_pose, etc)
    kwargs : dict
        Extra parameters for dataset creation

    Returns
    -------
    dataset : Dataset
        Dataset class for that mode
    """
    # If no dataset is given, return None
    if len(config.path) == 0:
        return None

    print0(pcolor('###### Setup %s datasets' % mode, 'red'))

    # Global shared dataset arguments
    dataset_args = {
        'back_context': config.back_context,
        'forward_context': config.forward_context,
        'data_transform': get_transforms(mode, **kwargs)
    }

    # Loop over all datasets
    datasets = []
    for i in range(len(config.split)):
        path_split = os.path.join(config.path[i], config.split[i])

        # Individual shared dataset arguments
        dataset_args_i = {
            'depth_type': config.depth_type[i] if 'gt_depth' in requirements else None,
            'input_depth_type': config.input_depth_type[i] if 'gt_depth' in requirements else None,
            'with_pose': 'gt_pose' in requirements,
        }

        # KITTI dataset
        if config.dataset[i] == 'KITTI':
            from packnet_sfm.datasets.kitti_dataset import KITTIDataset
            dataset = KITTIDataset(
                config.path[i], path_split,
                **dataset_args, **dataset_args_i,
            )
        # DGP dataset
        elif config.dataset[i] == 'DGP':
            from packnet_sfm.datasets.dgp_dataset import DGPDataset
            dataset = DGPDataset(
                config.path[i], config.split[i],
                **dataset_args, **dataset_args_i,
                cameras=config.cameras[i],
            )
        # Image dataset
        elif config.dataset[i] == 'Image':
            from packnet_sfm.datasets.image_dataset import ImageDataset
            dataset = ImageDataset(
                config.path[i], config.split[i],
                **dataset_args, **dataset_args_i,
            )
        else:
            ValueError('Unknown dataset %d' % config.dataset[i])

        # Repeat if needed
        if 'repeat' in config and config.repeat[i] > 1:
            dataset = ConcatDataset([dataset for _ in range(config.repeat[i])])
        datasets.append(dataset)

        # Display dataset information
        bar = '######### {:>7}'.format(len(dataset))
        if 'repeat' in config:
            bar += ' (x{})'.format(config.repeat[i])
        bar += ': {:<}'.format(path_split)
        print0(pcolor(bar, 'yellow'))

    # If training, concatenate all datasets into a single one
    if mode == 'train':
        datasets = [ConcatDataset(datasets)]

    return datasets
예제 #2
0
def setup_dataset(config, mode, requirements, **kwargs):
    """
    Create a dataset class

    Parameters
    ----------
    config : CfgNode
        Configuration (cf. configs/default_config.py)
    mode : str {'train', 'validation', 'test'}
        Mode from which we want the dataset
    requirements : dict (string -> bool)
        Different requirements for dataset loading (gt_depth, gt_pose, etc)
    kwargs : dict
        Extra parameters for dataset creation

    Returns
    -------
    dataset : Dataset
        Dataset class for that mode
    """
    # If no dataset is given, return None
    if len(config.path) == 0:
        return None

    print0(pcolor('###### Setup %s datasets' % mode, 'red'))

    # Global shared dataset arguments
    dataset_args = {
        'back_context': config.back_context,
        'forward_context': config.forward_context,
        'with_geometric_context': config.with_geometric_context,
    }

    # Loop over all datasets
    datasets = []
    for i in range(len(config.split)):
        path_split = os.path.join(config.path[i], config.split[i])

        # Individual shared dataset arguments
        if config.dataset[i] == 'ValeoMultifocal':
            dataset_args_i = {
                'depth_type':
                config.depth_type[i] if requirements['gt_depth'] else None,
                'with_pose':
                requirements['gt_pose'],
                'data_transform':
                get_transforms_multifocal(mode, **kwargs),
                'with_spatiotemp_context':
                config.with_spatiotemp_context,
            }
        elif config.dataset[i] == 'KITTIValeoFisheye':
            dataset_args_i = {
                'depth_type':
                config.depth_type[i] if requirements['gt_depth'] else None,
                'with_pose':
                requirements['gt_pose'],
                'data_transform':
                get_transforms_fisheye(mode, **kwargs),
                'calibrations_suffix':
                config.calibrations_suffix,
                'depth_suffix':
                config.depth_suffix,
                'cam_convs':
                config.cam_convs
            }
        elif config.dataset[i] == 'KITTIValeoDistorted':
            dataset_args_i = {
                'depth_type':
                config.depth_type[i] if requirements['gt_depth'] else None,
                'with_pose':
                requirements['gt_pose'],
                'data_transform':
                get_transforms_distorted(mode, **kwargs)
            }
        elif config.dataset[i] == 'DGPvaleo':
            dataset_args_i = {
                'depth_type':
                config.depth_type[i] if requirements['gt_depth'] else None,
                'with_pose':
                requirements['gt_pose'],
                'data_transform':
                get_transforms_dgp_valeo(mode, **kwargs)
            }
        elif config.dataset[i] == 'WoodscapeFisheye':
            dataset_args_i = {
                'depth_type':
                config.depth_type[i] if requirements['gt_depth'] else None,
                'with_pose':
                requirements['gt_pose'],
                'data_transform':
                get_transforms_woodscape_fisheye(mode, **kwargs)
            }
        else:
            dataset_args_i = {
                'depth_type':
                config.depth_type[i] if requirements['gt_depth'] else None,
                'with_pose':
                requirements['gt_pose'],
                'data_transform':
                get_transforms(mode, **kwargs)
            }

        if config.dataset[i] == 'ValeoMultifocal':
            from packnet_sfm.datasets.kitti_based_valeo_dataset_multifocal import KITTIBasedValeoDatasetMultifocal
            dataset = KITTIBasedValeoDatasetMultifocal(
                config.path[i],
                path_split,
                **dataset_args,
                **dataset_args_i,
                cameras=config.cameras[i],
            )
        # KITTI dataset
        elif config.dataset[i] == 'KITTI':
            from packnet_sfm.datasets.kitti_dataset import KITTIDataset
            dataset = KITTIDataset(
                config.path[i],
                path_split,
                **dataset_args,
                **dataset_args_i,
            )
        # DGP dataset
        elif config.dataset[i] == 'DGP':
            from packnet_sfm.datasets.dgp_dataset import DGPDataset
            dataset = DGPDataset(
                config.path[i],
                config.split[i],
                **dataset_args,
                **dataset_args_i,
                cameras=config.cameras[i],
            )
        # DGP dataset
        elif config.dataset[i] == 'DGPvaleo':
            from packnet_sfm.datasets.dgp_valeo_dataset import DGPvaleoDataset
            dataset = DGPvaleoDataset(
                config.path[i],
                config.split[i],
                **dataset_args,
                **dataset_args_i,
                cameras=config.cameras[i],
            )
        # Image dataset
        elif config.dataset[i] == 'Image':
            from packnet_sfm.datasets.image_dataset import ImageDataset
            dataset = ImageDataset(
                config.path[i],
                config.split[i],
                **dataset_args,
                **dataset_args_i,
            )
        # KITTI-based Valeo dataset
        elif config.dataset[i] == 'KITTIValeo':
            from packnet_sfm.datasets.kitti_based_valeo_dataset import KITTIBasedValeoDataset
            dataset = KITTIBasedValeoDataset(
                config.path[i],
                path_split,
                **dataset_args,
                **dataset_args_i,
                cameras=config.cameras[i],
            )
        # KITTI-based Valeo dataset (fisheye)
        elif config.dataset[i] == 'KITTIValeoFisheye':
            from packnet_sfm.datasets.kitti_based_valeo_dataset_fisheye_singleView import \
                KITTIBasedValeoDatasetFisheye_singleView
            dataset = KITTIBasedValeoDatasetFisheye_singleView(
                config.path[i],
                path_split,
                **dataset_args,
                **dataset_args_i,
                cameras=config.cameras[i],
            )
        elif config.dataset[i] == 'KITTIValeoDistorted':
            from packnet_sfm.datasets.kitti_based_valeo_dataset_distorted_singleView import \
                KITTIBasedValeoDatasetDistorted_singleView
            dataset = KITTIBasedValeoDatasetDistorted_singleView(
                config.path[i],
                path_split,
                **dataset_args,
                **dataset_args_i,
                cameras=config.cameras[i],
            )
        elif config.dataset[i] == 'WoodscapeFisheye':
            from packnet_sfm.datasets.woodscape_fisheye import WoodscapeFisheye
            dataset = WoodscapeFisheye(
                config.path[i],
                path_split,
                **dataset_args,
                **dataset_args_i,
                cameras=config.cameras[i],
            )
        # Image-based Valeo dataset
        elif config.dataset[i] == 'ImageValeo':
            from packnet_sfm.datasets.image_based_valeo_dataset import ImageBasedValeoDataset
            dataset = ImageBasedValeoDataset(
                config.path[i],
                config.split[i],
                **dataset_args,
                **dataset_args_i,
                cameras=config.cameras[i],
            )
        else:
            ValueError('Unknown dataset %d' % config.dataset[i])

        # Repeat if needed
        if 'repeat' in config and config.repeat[i] > 1:
            dataset = ConcatDataset([dataset for _ in range(config.repeat[i])])
        datasets.append(dataset)

        # Display dataset information
        bar = '######### {:>7}'.format(len(dataset))
        if 'repeat' in config:
            bar += ' (x{})'.format(config.repeat[i])
        bar += ': {:<}'.format(path_split)
        print0(pcolor(bar, 'yellow'))

    # If training, concatenate all datasets into a single one
    if mode == 'train':
        datasets = [ConcatDataset(datasets)]

    return datasets
예제 #3
0
def setup_dataset(config, mode, requirements, **kwargs):
    """
    Create a dataset class

    Parameters
    ----------
    config : CfgNode
        Configuration (cf. configs/default_config.py)
    mode : str {'train', 'validation', 'test'}
        Mode from which we want the dataset
    requirements : dict (string -> bool)
        Different requirements for dataset loading (gt_depth, gt_pose, etc)
    kwargs : dict
        Extra parameters for dataset creation

    Returns
    -------
    dataset : Dataset
        Dataset class for that mode
    """
    # If no dataset is given, return None
    if len(config.path) == 0:
        return None

    print0(pcolor("###### Setup %s datasets" % mode, "red"))

    # Global shared dataset arguments
    dataset_args = {
        "back_context": config.back_context,
        "forward_context": config.forward_context,
        "data_transform": get_transforms(mode, **kwargs),
    }

    # Loop over all datasets
    datasets = []
    for i in range(len(config.split)):
        path_split = os.path.join(config.path[i], config.split[i])

        # Individual shared dataset arguments
        dataset_args_i = {
            "depth_type":
            config.depth_type[i] if requirements["gt_depth"] else None,
            "with_pose": requirements["gt_pose"],
        }

        # KITTI dataset
        if config.dataset[i] == "KITTI":
            from packnet_sfm.datasets.kitti_dataset import KITTIDataset

            dataset = KITTIDataset(
                config.path[i],
                path_split,
                **dataset_args,
                **dataset_args_i,
            )
        # DGP dataset
        elif config.dataset[i] == "DGP":
            from packnet_sfm.datasets.dgp_dataset import DGPDataset

            dataset = DGPDataset(
                config.path[i],
                config.split[i],
                **dataset_args,
                **dataset_args_i,
                cameras=config.cameras[i],
            )
        # Image dataset
        elif config.dataset[i] == "Image":
            from packnet_sfm.datasets.image_dataset import ImageDataset

            dataset = ImageDataset(
                config.path[i],
                config.split[i],
                **dataset_args,
                **dataset_args_i,
            )
        else:
            ValueError("Unknown dataset %d" % config.dataset[i])

        # Repeat if needed
        if "repeat" in config and config.repeat[i] > 1:
            dataset = ConcatDataset([dataset for _ in range(config.repeat[i])])
        datasets.append(dataset)

        # Display dataset information
        bar = "######### {:>7}".format(len(dataset))
        if "repeat" in config:
            bar += " (x{})".format(config.repeat[i])
        bar += ": {:<}".format(path_split)
        print0(pcolor(bar, "yellow"))

    # If training, concatenate all datasets into a single one
    if mode == "train":
        datasets = [ConcatDataset(datasets)]

    return datasets