Exemple #1
0
def download_kitti_all(kitti_folder='kitti_download'):
    """ This python script downloads all KITTI folders and arranges them in a
    coherent data structure which can respectively be used by the other data
    scripts. It is recommended to keep the standard name kitti. Note that the
    path is determined automatically inside of file_io/get_path.py

    :param kitti_folder: Name of the folder in which the dataset should be downloaded
                    This is no path but just a name. the path is determined by
                    get_path.py
    """
    path_getter = gp.GetPath()
    dataset_folder_path = path_getter.get_data_path()
    assert os.path.isdir(dataset_folder_path), 'Path to dataset folder does not exist'

    kitti_path = os.path.join(dataset_folder_path, kitti_folder)
    kitti_raw_data = pd.read_csv('kitti_archives_to_download.txt',
                                 header=None, delimiter=' ')[0].values
    kitti_path_raw = os.path.join(kitti_path, 'Raw_data')
    if not os.path.isdir(kitti_path_raw):
        os.makedirs(kitti_path_raw)
    for url in kitti_raw_data:
        folder = os.path.split(url)[1]
        folder = os.path.join(kitti_path_raw, folder)
        folder = folder[:-4]
        wget.download(url, out=kitti_path_raw)
        unzipper = zipfile.ZipFile(folder + '.zip', 'r')
        unzipper.extractall(kitti_path_raw)
        unzipper.close()
        os.remove(folder + '.zip')
 def __init__(self, dataset):
     path_getter = gp.GetPath()
     dataset_folder = path_getter.get_data_path()
     path = os.path.join(dataset_folder, dataset, 'parameters.json')
     if not os.path.isdir(os.path.join(dataset_folder, dataset)):
         raise Exception(
             'There is no dataset folder called {}'.format(dataset))
     if not os.path.isfile(path):
         raise Exception(
             'There is no parameters.json file in the dataset folder. Please make sure '
             'to place the downloaded file into tha dataset folder.')
     with open(path) as file:
         param_dict = json.load(file)
     self._dataset = dataset
     self._K = param_dict['K']
     if self._K is not None:
         self._K = np.array(self._K, dtype=np.float32)
     if param_dict['stereo_T'] is not None:
         self._stereo_T = np.eye(4, dtype=np.float32)
         self._stereo_T[0, 3] = param_dict['stereo_T']
     else:
         self._stereo_T = None
     self._depth_mode = param_dict['depth_mode']
     self._flow_mode = param_dict['flow_mode']
     self._splits = param_dict['splits']
     labels_name = param_dict['labels']
     if labels_name in lf.dataset_labels.keys():
         self.labels = lf.dataset_labels[labels_name].getlabels()
         self.labels_mode = param_dict['labels_mode']
     else:
         self.labels = None
         self.labels_mode = None
Exemple #3
0
def adjust_projectedvelodyne_folders(kitti_folder='kitti_download'):
    """ This function adjust the format of the sparse ground truth folder structure
    to the structure of the KITTI raw data and afterward removes the old directories.
    It is taken care that only the directories from the Download are worked on so that
    the procedure does not work on directories which it is not supposed to"""

    path_getter = gp.GetPath()
    dataset_folder_path = path_getter.get_data_path()
    gt_path = os.path.join(dataset_folder_path, kitti_folder)
    gt_path = os.path.join(gt_path, 'Depth_projected')
    assert os.path.isdir(gt_path), 'Path to data does not exist'
    folders = dl.DirLister.get_directories(gt_path)
    folders = dl.DirLister.include_dirs_by_name(folders, 'proj_depth')
    for f in folders:
        ground_path, camera = os.path.split(f)
        ground_path = os.path.split(ground_path)[0]
        ground_path = os.path.split(ground_path)[0]
        target_path = os.path.join(ground_path, camera, 'data')
        if not os.path.isdir(target_path):
            os.makedirs(target_path)
        else:
            continue
        for filepath in glob.glob(os.path.join(f, '*')):
            # Move each file to destination Directory
            shutil.move(filepath, target_path)
        print(target_path)

    for f in folders:
        remove_path = os.path.split(f)[0]
        remove_path = os.path.split(remove_path)[0]
        print(remove_path)
        shutil.rmtree(remove_path, ignore_errors=True)
Exemple #4
0
    def _init_log_dir(self, opt):
        path_getter = get_path.GetPath()
        log_base = path_getter.get_checkpoint_path()

        self.log_path = os.path.join(log_base, opt.experiment_class,
                                     opt.model_name)

        os.makedirs(self.log_path, exist_ok=True)
Exemple #5
0
    def __init__(self, experiment_class, model_name, device, model_type,
                 num_layers, num_layers_vgg, weights_init, learning_rate,
                 weight_decay, scheduler_step_size):

        if model_type == 'vgg':
            num_layers = num_layers_vgg

        self.device = device

        path_getter = get_path.GetPath()
        self.log_base = path_getter.get_checkpoint_path()
        self.log_path = os.path.join(self.log_base, experiment_class,
                                     model_name)

        self._init_training()
        self._init_model(model_type, num_layers, weights_init)
        self._init_optimizer(learning_rate, weight_decay, scheduler_step_size)
    def __init__(self, experiment_class, model_name, device, split_pos,
                 num_layers, grad_scale_depth, grad_scale_seg, weights_init,
                 resolutions_depth, num_layers_pose, learning_rate,
                 weight_decay, scheduler_step_size):

        self.device = device

        path_getter = get_path.GetPath()
        self.log_base = path_getter.get_checkpoint_path()
        self.log_path = os.path.join(self.log_base, experiment_class,
                                     model_name)

        self._init_training()
        self._init_model(split_pos, num_layers, grad_scale_depth,
                         grad_scale_seg, weights_init, resolutions_depth,
                         num_layers_pose)
        self._init_optimizer(learning_rate, weight_decay, scheduler_step_size)
Exemple #7
0
def generate_depth_from_velo(kitti_folder='kitti_download'):
    """ This function generates the depth maps that correspond to the
    single point clouds of the raw LiDAR scans"""

    path_getter = gp.GetPath()
    dataset_folder_path = path_getter.get_data_path()
    gt_path = os.path.join(dataset_folder_path, kitti_folder)
    depth_path = os.path.join(gt_path, 'Depth')
    gt_path = os.path.join(gt_path, 'Raw_data')
    assert os.path.isdir(gt_path), 'Path to data does not exist'
    folders = dl.DirLister.get_directories(gt_path)
    folders = dl.DirLister.include_dirs_by_name(folders, 'velodyne_points')
    for f in folders:
        base_dir = os.path.split(f)[0]
        base_dir = os.path.split(base_dir)[0]
        calib_dir = os.path.split(base_dir)[0]
        image_dir_2 = os.path.join(base_dir, 'image_02', 'data')
        image_dir_3 = os.path.join(base_dir, 'image_03', 'data')
        day, drive = os.path.split(base_dir)
        day = os.path.split(day)[1]
        depth_dir_2 = os.path.join(depth_path, day, drive, 'image_02', 'data')
        depth_dir_3 = os.path.join(depth_path, day, drive, 'image_03', 'data')
        if not os.path.isdir(depth_dir_2):
            os.makedirs(depth_dir_2)
        if not os.path.isdir(depth_dir_3):
            os.makedirs(depth_dir_3)

        for file in glob.glob(os.path.join(f, '*')):
            filename = os.path.split(file)[1]
            filename_img = filename[:-3] + 'png'
            im_size_2 = cv2.imread(os.path.join(image_dir_2,
                                                filename_img)).shape[:2]
            im_size_3 = cv2.imread(os.path.join(image_dir_3,
                                                filename_img)).shape[:2]
            depth_2 = pcl_to_depth_map(calib_dir, file, im_size_2, 2)
            depth_3 = pcl_to_depth_map(calib_dir, file, im_size_3, 3)
            depth_2 = (depth_2 * 256).astype(np.uint16)
            depth_3 = (depth_3 * 256).astype(np.uint16)

            cv2.imwrite(os.path.join(depth_dir_2, filename_img), depth_2)
            cv2.imwrite(os.path.join(depth_dir_3, filename_img), depth_3)
        print(f)
    def __init__(self, dataset, path=None):
        """Initializes the split creator class, mainly defines which dataset is supposed to be used.

        :param dataset: name of the dataset folder
        :param path: makes it possible to self-define a path (not recommended)
        """
        assert dataset in SUPPORTED_DATASETS, 'Dataset not supported'
        self.dataset = dataset
        if path:
            self.dataset_folder_path = os.path.join(path, dataset)
        else:
            path_getter = gp.GetPath()
            self.dataset_folder_path = os.path.join(
                path_getter.get_data_path(), dataset)
        assert os.path.isdir(
            self.dataset_folder_path), 'Path to dataset does not exist'
        self.output_path = None

        self.filename = 'basic_files' + '.json'
        self.json_data = None
        self.new_json_data = {}
def create_parameter_files(datasets=None):
    if datasets is None:
        datasets = dataset_index.keys()
        parameters = dataset_index.values()
    else:
        if type(datasets) == str:
            datasets = [datasets]
        parameters = []
        for set in datasets:
            assert set in dataset_index.keys(), '{} is not a valid dataset'.format(set)
            parameters.append(dataset_index[set])
    path_getter = gp.GetPath()
    data_path = path_getter.get_data_path()
    for dataset, param in zip(datasets, parameters):
        dataset_path = os.path.join(data_path, dataset)
        if os.path.isdir(dataset_path):
            dump_location = os.path.join(dataset_path, 'parameters.json')
            with open(dump_location, 'w') as fp:
                json.dump(param, fp)
            print("{}: OK".format(dataset))
        else:
            print("{}: not found".format(dataset))
Exemple #10
0
 def __init__(self, num_classes, opt):
     super().__init__()
     self.encoder = Encoder(num_classes)
     self.decoder = Decoder(num_classes)
     if opt.weights_init == 'pretrained':
         path_getter = gp.GetPath()
         checkpoint_path = path_getter.get_checkpoint_path()
         encoder_path = os.path.join(checkpoint_path, 'erfnet',
                                     'official_pretrained',
                                     'erfnet_encoder_pretrained.pth.tar')
         cur_state_dict = self.encoder.state_dict()
         if opt.no_cuda:
             load_state_dict = torch.load(encoder_path, map_location='cpu')
         else:
             load_state_dict = torch.load(encoder_path)
         counter = 0
         load_keys = list(load_state_dict['state_dict'].keys())
         for key in list(cur_state_dict.keys()):
             if key in load_keys[counter]:
                 cur_state_dict[key] = load_state_dict['state_dict'][
                     load_keys[counter]]
                 counter += 1
         self.encoder.load_state_dict(cur_state_dict)
Exemple #11
0
        return dir_list

    @staticmethod
    def get_files_by_ending(cur_dir, file_ending, ignore=[]):
        """ returns all files inside a directory which have a certain ending

        :param cur_dir: list of directories
        :param file_ending: all files with the specified file_ending are returned
        :param ignore: list of strings. Filenames containing one of these strings will be ignored.
        :return: all files inside cur_dir which have the ending file_ending
        """
        if DirLister.check_formats(cur_dir=cur_dir,
                                   file_ending=file_ending) is False:
            sys.exit("Inputparameter überprüfen")
        files = DirLister.list_files_in_directory(cur_dir)
        len_ending = len(file_ending)
        files = [x for x in files if x[-len_ending:] == file_ending]
        for ignore_string in ignore:
            files = [x for x in files if ignore_string not in x]
        return files


if __name__ == '__main__':
    """can be used for testing purposes"""
    path_getter = gp.GetPath()
    path = path_getter.get_data_path()
    path = os.path.join(path, 'Cityscapes')
    a = DirLister()
    test = a.get_directories(path)
    print(a.include_dirs_by_name(test, 'test'))
Exemple #12
0
    def _gen_dataset_path(self, dataset):
        path_getter = gp.GetPath()
        dataset_folder = path_getter.get_data_path()

        return os.path.join(dataset_folder, dataset)
Exemple #13
0
    def __init__(self,
                 dataset,
                 trainvaltest_split,
                 video_mode='mono',
                 stereo_mode='mono',
                 cluster_mode=None,
                 simple_mode=False,
                 labels=None,
                 labels_mode=None,
                 data_transforms=None,
                 scales=None,
                 keys_to_load=None,
                 keys_to_video=None,
                 keys_to_stereo=None,
                 split=None,
                 video_frames=None,
                 disable_const_items=True,
                 folders_to_load=None,
                 files_to_load=None,
                 n_files=None,
                 output_filenames=False,
                 flow_validation_mode=True):
        """Initialises the dataset by loading the desired data from the json file

        :param dataset: name of the dataset
        :param trainvaltest_split: can be train, validation or test
        :param video_mode: can be mono or video and defines if only the images or image sequences are to be loaded
        :param stereo_mode: can be mono or stereo and defines if the stereo images are to be loaded
        :param simple_mode: if True, the Data is read directly from a folder without using a .json file
        :param labels: gives the labels as defined in the named tuples style in Cityscapes. Get the labels from
            defintions folder
        :param labels_mode: can be fromid or fromrgb and defines if the segmentation masks are given as id or color
        :param data_transforms: takes the transforms.compose list
        :param scales: list of all scales at which the images should be loaded (list of exponents for powers of 2)
        :param keys_to_load: defines all keys which should be loaded
        :param keys_to_video: defines for which keys the sequences are to be loaded
        :param keys_to_stereo: defines for which keys the stereo images are supposed to be loaded
        :param split: dataset split that is supposed to be loaded. default is the complete dataset itself
        :param video_frames: all frames of the sequence that are supposed to be loaded (list of frame numbers relative
            to the main frame, e.g. [0, -2, -1, 1, 2])
        :param disable_const_items: removes the constant items like camera calibration from loading procedure
        :param folders_to_load: list of folders from which data should be loaded; folders not mentioned are skipped in
            the respective set. Only the last folder in a path is considered; filter is case insensitive.
            Default: None -> all folders are loaded from dataset
        :param files_to_load: list of files that should be loaded; files not mentioned are skipped in the respective
            set. File names need not be complete; filter is case insensitive.
            Default: None -> all files are loaded from dataset
        :param n_files: How many files shall be loaded. Files are selected randomly if there are more files than n_files
            Seeded by numpy.random.seed()
        """
        super(BaseDataset, self).__init__()
        assert isinstance(dataset, str)
        assert trainvaltest_split in (
            'train', 'validation',
            'test'), '''trainvaltest_split must be train,
        validation or test'''
        assert video_mode in ('mono',
                              'video'), 'video_mode must be mono or video'
        assert stereo_mode in ('mono',
                               'stereo'), 'stereo_mode must be mono or stereo'
        assert isinstance(simple_mode, bool)
        if data_transforms is None:
            data_transforms = [
                mytransforms.CreateScaledImage(),
                mytransforms.CreateColoraug(),
                mytransforms.ToTensor()
            ]
        if scales is None:
            scales = [0]
        if keys_to_load is None:
            keys_to_load = ['color']
        if keys_to_stereo is None and stereo_mode == 'stereo':
            keys_to_stereo = ['color']
        if keys_to_video is None and video_mode == 'video':
            keys_to_video = ['color']
        if video_frames is None:
            video_frames = [0, -1, 1]

        self.dataset = dataset
        self.video_mode = video_mode
        self.stereo_mode = stereo_mode
        self.scales = scales
        self.disable_const_items = disable_const_items
        self.output_filenames = output_filenames
        self.parameters = dps.DatasetParameterset(dataset)
        if labels is not None:
            self.parameters.labels = labels
        if labels_mode is not None:
            self.parameters.labels_mode = labels_mode
        path_getter = gp.GetPath()
        dataset_folder = path_getter.get_data_path()
        datasetpath = os.path.join(dataset_folder, self.dataset)
        self.datasetpath = datasetpath
        if split is None:
            splitpath = None
        else:
            splitpath = os.path.join(dataset_folder,
                                     self.dataset + '_' + split)

        if simple_mode is False:
            self.data = self.read_json_file(datasetpath, splitpath,
                                            trainvaltest_split, keys_to_load,
                                            keys_to_stereo, keys_to_video,
                                            video_frames, folders_to_load,
                                            files_to_load, n_files)
        else:
            self.data = self.read_from_folder(datasetpath, keys_to_load,
                                              video_mode, video_frames)

        self.load_transforms = transforms.Compose([
            mytransforms.LoadRGB(),
            mytransforms.LoadSegmentation(),
            mytransforms.LoadDepth(),
            mytransforms.LoadFlow(validation_mode=flow_validation_mode),
            mytransforms.LoadNumerics()
        ])

        # IMPORTANT to create a new list if the same list is passed to multiple datasets. Otherwise, due to the
        # mutability of lists, ConvertSegmentation will only be added once. Hence, the labels may be wrong for the 2nd,
        # 3rd, ... dataset!
        self.data_transforms = list(data_transforms)

        # Error if CreateColorAug and CreateScaledImage not in transforms.
        if mytransforms.CreateScaledImage not in data_transforms:
            raise Exception(
                'The transform CreateScaledImage() has to be part of the data_transforms list'
            )
        if mytransforms.CreateColoraug not in data_transforms:
            raise Exception(
                'The transform CreateColoraug() has to be part of the data_transforms list'
            )

        # Error if depth, segmentation or flow keys are given but not the corresponding Convert-Transform
        if any([key.startswith('segmentation') for key in keys_to_load]) and \
                mytransforms.ConvertSegmentation not in self.data_transforms:
            raise Exception(
                'When loading segmentation images, please add mytransforms.ConvertSegmentation() to '
                'the data_transforms')
        if any([key.startswith('depth') for key in keys_to_load]) and \
                mytransforms.ConvertDepth not in self.data_transforms:
            raise Exception(
                'When loading depth images, please add mytransforms.ConvertDepth() to the data_transforms'
            )
        if any([key.startswith('flow') for key in keys_to_load]) and \
                mytransforms.ConvertFlow not in self.data_transforms:
            raise Exception(
                'When loading flow images, please add mytransforms.ConvertFlow() to the data_transforms'
            )

        # In the flow validation mode, it is not allowed to use data-altering transforms
        if any([key.startswith('flow')
                for key in keys_to_load]) and flow_validation_mode:
            allowed_transforms = [
                mytransforms.CreateScaledImage, mytransforms.CreateColoraug,
                mytransforms.ConvertSegmentation, mytransforms.ConvertDepth,
                mytransforms.ConvertFlow, mytransforms.RemoveOriginals,
                mytransforms.ToTensor, mytransforms.Relabel,
                mytransforms.OneHotEncoding, mytransforms.NormalizeZeroMean,
                mytransforms.AdjustKeys, mytransforms.RemapKeys,
                mytransforms.AddKeyValue
            ]
            for transform in self.data_transforms:
                if transform not in allowed_transforms:
                    raise Exception(
                        'In flow validation mode, it is not allowed to use data-altering transforms'
                    )

        # Set the correct parameters to the ConvertDepth and ConvertSegmentation transforms
        for i, transform in zip(range(len(self.data_transforms)),
                                self.data_transforms):
            if isinstance(transform, mytransforms.ConvertDepth):
                transform.set_mode(self.parameters.depth_mode)
            elif isinstance(transform, mytransforms.ConvertSegmentation):
                transform.set_mode(self.parameters.labels,
                                   self.parameters.labels_mode)
            elif isinstance(transform, mytransforms.ConvertFlow):
                transform.set_mode(self.parameters.flow_mode,
                                   flow_validation_mode)

        self.data_transforms = transforms.Compose(self.data_transforms)
Exemple #14
0
def download_kitti_all(kitti_folder='kitti_download'):
    """ This pathon-script downloads all KITTI folders and aranges them in a
    coherent data structure which can respectively be used by the other data
    scripts. It is recommended to keep the standard name KITTI. Note that the
    path is determined automatically inside of file_io/get_path.py

    parameters:
    - kitti_folder: Name of the folder in which the dataset should be downloaded
                    This is no path but just a name. the path ios determined by
                    get_path.py

    """

    # Download the standard KITTI Raw data

    path_getter = gp.GetPath()
    dataset_folder_path = path_getter.get_data_path()
    assert os.path.isdir(
        dataset_folder_path), 'Path to dataset folder does not exist'

    kitti_path = os.path.join(dataset_folder_path, kitti_folder)
    kitti_raw_data = pd.read_csv('kitti_archives_to_download.txt',
                                 header=None,
                                 delimiter=' ')[0].values
    kitti_path_raw = os.path.join(kitti_path, 'Raw_data')
    if not os.path.isdir(kitti_path_raw):
        os.makedirs(kitti_path_raw)
    for url in kitti_raw_data:
        folder = os.path.split(url)[1]
        folder = os.path.join(kitti_path_raw, folder)
        folder = folder[:-4]
        wget.download(url, out=kitti_path_raw)
        unzipper = zipfile.ZipFile(folder + '.zip', 'r')
        unzipper.extractall(kitti_path_raw)
        unzipper.close()
        os.remove(folder + '.zip')

    kitti_dirs_days = os.listdir(kitti_path_raw)

    # Get ground truth depths

    kitti_path_depth_annotated = os.path.join(kitti_path, 'Depth_improved')
    if not os.path.isdir(kitti_path_depth_annotated):
        os.makedirs(kitti_path_depth_annotated)
    url_depth_annotated = 'https://s3.eu-central-1.amazonaws.com/avg-kitti/data_depth_annotated.zip'
    wget.download(url_depth_annotated, out=kitti_path_depth_annotated)
    depth_zipped = os.path.join(kitti_path_depth_annotated,
                                os.path.split(url_depth_annotated)[1])
    unzipper = zipfile.ZipFile(depth_zipped, 'r')
    unzipper.extractall(kitti_path_depth_annotated)
    unzipper.close()
    os.remove(depth_zipped)

    trainval_folder = os.listdir(kitti_path_depth_annotated)
    kitti_drives_list = []
    for sub_folder in trainval_folder:
        sub_folder = os.path.join(kitti_path_depth_annotated, sub_folder)
        kitti_drives_list.extend(
            [os.path.join(sub_folder, i) for i in os.listdir(sub_folder)])

    for sub_folder in kitti_dirs_days:
        sub_folder = os.path.join(kitti_path_depth_annotated, sub_folder)
        if not os.path.isdir(sub_folder):
            os.makedirs(sub_folder)
        for drive in kitti_drives_list:
            if os.path.split(sub_folder)[1] in drive:
                shutil.move(drive, sub_folder)

    for sub_folder in trainval_folder:
        sub_folder = os.path.join(kitti_path_depth_annotated, sub_folder)
        shutil.rmtree(sub_folder)

    # Get sparse depths

    kitti_path_depth_sparse = os.path.join(kitti_path, 'Depth_projected')
    if not os.path.isdir(kitti_path_depth_sparse):
        os.makedirs(kitti_path_depth_sparse)
    url_depth_sparse = 'https://s3.eu-central-1.amazonaws.com/avg-kitti/data_depth_velodyne.zip'
    wget.download(url_depth_sparse, out=kitti_path_depth_sparse)
    depth_zipped = os.path.join(kitti_path_depth_sparse,
                                os.path.split(url_depth_sparse)[1])
    unzipper = zipfile.ZipFile(depth_zipped, 'r')
    unzipper.extractall(kitti_path_depth_sparse)
    unzipper.close()
    os.remove(depth_zipped)

    trainval_folder = os.listdir(kitti_path_depth_sparse)
    kitti_drives_list = []
    for sub_folder in trainval_folder:
        sub_folder = os.path.join(kitti_path_depth_sparse, sub_folder)
        kitti_drives_list.extend(
            [os.path.join(sub_folder, i) for i in os.listdir(sub_folder)])

    for sub_folder in kitti_dirs_days:
        sub_folder = os.path.join(kitti_path_depth_sparse, sub_folder)
        if not os.path.isdir(sub_folder):
            os.makedirs(sub_folder)
        for drive in kitti_drives_list:
            if os.path.split(sub_folder)[1] in drive:
                shutil.move(drive, sub_folder)

    for sub_folder in trainval_folder:
        sub_folder = os.path.join(kitti_path_depth_sparse, sub_folder)
        shutil.rmtree(sub_folder)

    # download test_files and integrate them into the folder structure

    url_depth_testset = 'https://s3.eu-central-1.amazonaws.com/avg-kitti/data_depth_selection.zip'
    wget.download(url_depth_testset, out=kitti_path)
    depth_zipped = os.path.join(kitti_path,
                                os.path.split(url_depth_testset)[1])
    unzipper = zipfile.ZipFile(depth_zipped, 'r')
    unzipper.extractall(kitti_path)
    unzipper.close()
    os.remove(depth_zipped)

    init_depth_completion_folder = os.path.join(
        kitti_path, 'depth_selection', 'test_depth_completion_anonymous',
        'image')
    target_depth_completion_folder = os.path.join(kitti_path_raw,
                                                  'test_depth_completion',
                                                  'image_02')
    if not os.path.isdir(target_depth_completion_folder):
        os.makedirs(target_depth_completion_folder)
    shutil.move(init_depth_completion_folder, target_depth_completion_folder)
    os.rename(
        os.path.join(target_depth_completion_folder,
                     os.path.split(init_depth_completion_folder)[1]),
        os.path.join(target_depth_completion_folder, 'data'))

    init_depth_completion_folder = os.path.join(
        kitti_path, 'depth_selection', 'test_depth_completion_anonymous',
        'intrinsics')
    target_depth_completion_folder = os.path.join(kitti_path_raw,
                                                  'test_depth_completion')
    shutil.move(init_depth_completion_folder, target_depth_completion_folder)

    init_depth_completion_folder = os.path.join(
        kitti_path, 'depth_selection', 'test_depth_completion_anonymous',
        'velodyne_raw')
    target_depth_completion_folder = os.path.join(kitti_path_depth_sparse,
                                                  'test_depth_completion',
                                                  'image_02')
    if not os.path.isdir(target_depth_completion_folder):
        os.makedirs(target_depth_completion_folder)
    shutil.move(init_depth_completion_folder, target_depth_completion_folder)
    os.rename(
        os.path.join(target_depth_completion_folder,
                     os.path.split(init_depth_completion_folder)[1]),
        os.path.join(target_depth_completion_folder, 'data'))

    init_depth_prediction_folder = os.path.join(
        kitti_path, 'depth_selection', 'test_depth_prediction_anonymous',
        'image')
    target_depth_prediction_folder = os.path.join(kitti_path_raw,
                                                  'test_depth_prediction',
                                                  'image_02')
    if not os.path.isdir(target_depth_prediction_folder):
        os.makedirs(target_depth_prediction_folder)
    shutil.move(init_depth_prediction_folder, target_depth_prediction_folder)
    os.rename(
        os.path.join(target_depth_prediction_folder,
                     os.path.split(init_depth_prediction_folder)[1]),
        os.path.join(target_depth_prediction_folder, 'data'))

    init_depth_prediction_folder = os.path.join(
        kitti_path, 'depth_selection', 'test_depth_prediction_anonymous',
        'intrinsics')
    target_depth_prediction_folder = os.path.join(kitti_path_raw,
                                                  'test_depth_prediction')
    shutil.move(init_depth_prediction_folder, target_depth_prediction_folder)

    shutil.rmtree(os.path.join(kitti_path, 'depth_selection'))