def read_png_depth(file):
    """Reads a .png depth map."""
    depth_png = np.array(load_convert_image(file), dtype=int)
    assert (np.max(depth_png) > 255), 'Wrong .png depth file'
    depth = depth_png.astype(np.float) / 256.
    depth[depth_png == 0] = -1.
    return np.expand_dims(depth, axis=2)
    def __getitem__(self, idx):
        """Get dataset sample given an index."""
        # Add image information
        sample = {
            'idx': idx,
            'filename': '%s_%010d' % (self.split, idx),
            'rgb': load_convert_image(self.paths[idx]),
        }

        # Add intrinsics
        #parent_folder = self._get_parent_folder(self.paths[idx])
        base_folder_str = self._get_base_folder(self.paths[idx])
        split_type_str = self._get_split_type(self.paths[idx])
        seq_name_str = self._get_sequence_name(self.paths[idx])
        camera_str = self._get_camera_name(self.paths[idx])
        calib_identifier = base_folder_str + split_type_str + seq_name_str + camera_str
        #current_folder = self._get_current_folder(self.paths[idx])
        if calib_identifier in self.calibration_cache:
            c_data = self.calibration_cache[calib_identifier]
        else:
            c_data = self._read_raw_calib_files(base_folder_str, split_type_str, seq_name_str, [camera_str])
            self.calibration_cache[calib_identifier] = c_data

        camera_type = self._get_camera_type(self.paths[idx], c_data)
        camera_type_int = self._get_camera_type_int(camera_type)
        poly_coeffs, principal_point, scale_factors, K, k, p = self._get_full_intrinsics(self.paths[idx], c_data)

        sample.update({
            'camera_type': camera_type_int,
            'intrinsics_poly_coeffs': poly_coeffs,
            'intrinsics_principal_point': principal_point,
            'intrinsics_scale_factors': scale_factors,
            'intrinsics_K': K,
            'intrinsics_k': k,
            'intrinsics_p': p,
            'path_to_ego_mask': self._get_path_to_ego_mask(self.paths[idx]),
        })

        # sample.update({
        #     'path_to_theta_lut': self._get_path_to_theta_lut(self.paths[idx]),
        # })

        if self.with_geometric_context:
            sample.update({
                'pose_matrix': self._get_extrinsics_pose_matrix(self.paths[idx], c_data),
            })

        if self.with_geometric_context and self.with_spatiotemp_context:
            sample.update({
                'with_spatiotemp_context': 1,
            })
        # # Add pose information if requested
        # if self.with_pose:
        #     sample.update({
        #         'pose': self._get_pose(self.paths[idx]),
        #     })

        # Add depth information if requested
        if self.with_depth:
            sample.update({
                'depth': self._read_depth(self._get_depth_file(self.paths[idx])),
            })

        # Add context information if requested
        if self.with_context:
            # Add context images

            # 1. TEMPORAL CONTEXT
            image_context_paths_backward, _ = \
                self._get_context_files(self.paths[idx], self.backward_context_paths[idx])
            image_context_paths_forward, _ = \
                self._get_context_files(self.paths[idx], self.forward_context_paths[idx])

            image_temporal_context_paths = image_context_paths_backward + image_context_paths_forward
            n_temporal_context = len(image_temporal_context_paths)
            image_temporal_context = [load_convert_image(f) for f in image_temporal_context_paths]

            sample.update({
                'rgb_temporal_context': image_temporal_context,
            })

            # 2. GEOMETRIC CONTEXT
            if self.with_geometric_context:
                image_context_paths_geometric_context = self.paths_geometric_context[idx]
                n_geometric_context = len(image_context_paths_geometric_context)
                base_folder_str_geometric_context = [
                    self._get_base_folder(context_path) for context_path in image_context_paths_geometric_context
                ]
                split_type_str_geometric_context = [
                    self._get_split_type(context_path) for context_path in image_context_paths_geometric_context
                ]
                seq_name_str_geometric_context = [
                    self._get_sequence_name(context_path) for context_path in image_context_paths_geometric_context
                ]
                camera_str_geometric_context = [
                    self._get_camera_name(context_path) for context_path in image_context_paths_geometric_context
                ]
                calib_identifier_geometric_context = [
                    base_folder_str + split_type_str + seq_name_str + camera_str
                    for base_folder_str, split_type_str, seq_name_str, camera_str
                    in zip(base_folder_str_geometric_context, 
                           split_type_str_geometric_context, 
                           seq_name_str_geometric_context, 
                           camera_str_geometric_context)
                ]
                c_data_geometric_context = []
                for i_context in range(n_geometric_context):
                    if calib_identifier_geometric_context[i_context] in self.calibration_cache:
                        c_data_geometric_context.append(
                            self.calibration_cache[calib_identifier_geometric_context[i_context]]
                        )
                    else:
                        c_data_tmp = self._read_raw_calib_files(base_folder_str_geometric_context[i_context],
                                                                split_type_str_geometric_context[i_context],
                                                                seq_name_str_geometric_context[i_context],
                                                                [camera_str_geometric_context[i_context]])
                        c_data_geometric_context.append(c_data_tmp)
                        self.calibration_cache[calib_identifier_geometric_context[i_context]] = c_data_tmp
                camera_type_geometric_context = [
                    self._get_camera_type(image_context_paths_geometric_context[i_context], 
                                          c_data_geometric_context[i_context])
                    for i_context in range(n_geometric_context)
                ]
                camera_type_geometric_context_int = [
                    self._get_camera_type_int(camera_type_geometric_context[i_context])
                    for i_context in range(n_geometric_context)
                ]
                poly_coeffs_geometric_context = []
                principal_point_geometric_context = []
                scale_factors_geometric_context = []
                K_geometric_context = []
                k_geometric_context = []
                p_geometric_context = []
                for i_context in range(n_geometric_context):
                    poly_coeffs_tmp, principal_point_tmp, scale_factors_tmp, K_tmp, k_tmp, p_tmp = \
                        self._get_full_intrinsics(image_context_paths_geometric_context[i_context], 
                                                  c_data_geometric_context[i_context])
                    poly_coeffs_geometric_context.append(poly_coeffs_tmp)
                    principal_point_geometric_context.append(principal_point_tmp)
                    scale_factors_geometric_context.append(scale_factors_tmp)
                    K_geometric_context.append(K_tmp)
                    k_geometric_context.append(k_tmp)
                    p_geometric_context.append(p_tmp)
                path_to_ego_mask_geometric_context = [
                    self._get_path_to_ego_mask(context_path) 
                    for context_path in image_context_paths_geometric_context
                ]
                absolute_pose_matrix_geometric_context = [
                    self._get_extrinsics_pose_matrix(image_context_paths_geometric_context[i_context], 
                                                     c_data_geometric_context[i_context])
                    for i_context in range(n_geometric_context)
                ]
                relative_pose_matrix_geometric_context = [
                    (absolute_context_pose @ invert_pose_numpy(sample['pose_matrix'])).astype(np.float32)
                    for absolute_context_pose in absolute_pose_matrix_geometric_context
                ]

                image_geometric_context = [load_convert_image(f) for f in image_context_paths_geometric_context]

                # must fill with dummy values
                for i_context in range(n_geometric_context, self.max_geometric_context):
                    image_geometric_context.append(Image.new('RGB', (1280, 800)))
                    camera_type_geometric_context_int.append(2)
                    K_tmp, k_tmp, p_tmp = self._get_null_intrinsics_distorted()
                    poly_coeffs_tmp, principal_point_tmp, scale_factors_tmp = self._get_null_intrinsics_fisheye()
                    poly_coeffs_geometric_context.append(poly_coeffs_tmp)
                    principal_point_geometric_context.append(principal_point_tmp)
                    scale_factors_geometric_context.append(scale_factors_tmp)
                    K_geometric_context.append(K_tmp)
                    k_geometric_context.append(k_tmp)
                    p_geometric_context.append(p_tmp)
                    path_to_ego_mask_geometric_context.append('')
                    relative_pose_matrix_geometric_context.append(np.eye(4).astype(np.float32))
                    absolute_pose_matrix_geometric_context.append(np.eye(4).astype(np.float32))

                camera_type_geometric_context_int = np.array(camera_type_geometric_context_int)

                sample.update({
                    'rgb_geometric_context': image_geometric_context,
                    'camera_type_geometric_context': camera_type_geometric_context_int,
                    'intrinsics_poly_coeffs_geometric_context': poly_coeffs_geometric_context,
                    'intrinsics_principal_point_geometric_context': principal_point_geometric_context,
                    'intrinsics_scale_factors_geometric_context': scale_factors_geometric_context,
                    'intrinsics_K_geometric_context': K_geometric_context,
                    'intrinsics_k_geometric_context': k_geometric_context,
                    'intrinsics_p_geometric_context': p_geometric_context,
                    'path_to_ego_mask_geometric_context': path_to_ego_mask_geometric_context,
                    'pose_matrix_geometric_context': relative_pose_matrix_geometric_context,
                    'pose_matrix_geometric_context_absolute': absolute_pose_matrix_geometric_context,
                })
            else:
                sample.update({
                    'rgb_geometric_context': [],
                    'camera_type_geometric_context': [],
                    'intrinsics_poly_coeffs_geometric_context': [],
                    'intrinsics_principal_point_geometric_context': [],
                    'intrinsics_scale_factors_geometric_context': [],
                    'intrinsics_K_geometric_context': [],
                    'intrinsics_k_geometric_context': [],
                    'intrinsics_p_geometric_context': [],
                    'path_to_ego_mask_geometric_context': [],
                    'pose_matrix_geometric_context': [],
                    'pose_matrix_geometric_context_absolute': [],

                    'rgb_geometric_context_temporal_context': [],
                })

            # 3. GEOMETRIC-TEMPORAL CONTEXT
            if self.with_geometric_context and self.with_spatiotemp_context:
                # Backward
                image_context_paths_geometric_context_backward_nested = \
                    self.backward_context_paths_geometric_context[idx]
                # Forward
                image_context_paths_geometric_context_forward_nested = \
                    self.forward_context_paths_geometric_context[idx]
                image_geometric_context_temporal_context_paths_nested = [
                    b + f for b, f in zip(image_context_paths_geometric_context_backward_nested,
                                          image_context_paths_geometric_context_forward_nested)
                ]
                image_geometric_context_temporal_context_paths = [
                    item for sublist in image_geometric_context_temporal_context_paths_nested for item in sublist
                ]
                n_spatiotemp_context = len(image_geometric_context_temporal_context_paths)
                image_geometric_context_temporal_context = [
                    load_convert_image(f) for f in image_geometric_context_temporal_context_paths
                ]
                # must fill with dummy values
                for i_context in range(n_geometric_context, self.max_geometric_context):
                    for j in range(n_temporal_context):
                        image_geometric_context_temporal_context.append(Image.new('RGB', (1280, 800)))

                sample.update({
                    'rgb_geometric_context_temporal_context': image_geometric_context_temporal_context,
                })
            else:
                sample.update({
                    'rgb_geometric_context_temporal_context':  [],
                })

        # Apply transformations
        if self.data_transform:
            sample = self.data_transform(sample)

        # Return sample
        return sample
    def __getitem__(self, idx):
        """Get dataset sample given an index."""
        # Add image information
        sample = {
            'idx': idx,
            'filename': '%s_%010d' % (self.split, idx),
            'rgb': load_convert_image(self.paths[idx]),
        }

        # Add intrinsics
        #parent_folder = self._get_parent_folder(self.paths[idx])
        base_folder_str = self._get_base_folder(self.paths[idx])
        split_type_str = self._get_split_type(self.paths[idx])
        seq_name_str = self._get_sequence_name(self.paths[idx])
        camera_str = self._get_camera_name(self.paths[idx])
        calib_identifier = base_folder_str + split_type_str + seq_name_str + camera_str
        #current_folder = self._get_current_folder(self.paths[idx])
        if calib_identifier in self.calibration_cache:
            c_data = self.calibration_cache[calib_identifier]
        else:
            c_data = self._read_raw_calib_files(base_folder_str,
                                                split_type_str, seq_name_str,
                                                [camera_str])
            self.calibration_cache[calib_identifier] = c_data

        K, k, p = self._get_intrinsics(self.paths[idx], c_data)
        sample.update({
            'intrinsics_K': K,
        })
        sample.update({
            'intrinsics_k': k,
        })
        sample.update({
            'intrinsics_p': p,
        })
        sample.update({
            'path_to_ego_mask':
            self._get_path_to_ego_mask(self.paths[idx]),
        })
        # Add pose information if requested
        if self.with_pose:
            sample.update({
                'pose': self._get_pose(self.paths[idx]),
            })

        # Add depth information if requested
        if self.with_depth:
            sample.update({
                'depth':
                self._read_depth(self._get_depth_file(self.paths[idx])),
            })

        # Add context information if requested
        if self.with_context:
            # Add context images
            all_context_idxs = self.backward_context_paths[
                idx] + self.forward_context_paths[idx]
            image_context_paths, _ = self._get_context_files(
                self.paths[idx], all_context_idxs)
            # same_timestep_as_origin   = [False for _ in range(len(image_context_paths))]
            # intrinsics_K_context       = [K for _ in range(len(image_context_paths))]
            # intrinsics_k_context       = [k for _ in range(len(image_context_paths))]
            # intrinsics_p_context       = [p for _ in range(len(image_context_paths))]
            path_to_ego_mask_context = [
                sample['path_to_ego_mask']
                for _ in range(len(image_context_paths))
            ]
            image_context = [
                load_convert_image(f) for f in image_context_paths
            ]
            sample.update({'rgb_context': image_context})
            # sample.update({
            #     'intrinsics_K_context': intrinsics_K_context
            # })
            # sample.update({
            #     'intrinsics_k_context': intrinsics_k_context
            # })
            # sample.update({
            #     'intrinsics_p_context': intrinsics_p_context
            # })
            sample.update(
                {'path_to_ego_mask_context': path_to_ego_mask_context})
            if self.with_pose:
                first_pose = sample['pose']
                image_context_pose = [
                    self._get_pose(f) for f in image_context_paths
                ]
                image_context_pose = [
                    invert_pose_numpy(context_pose) @ first_pose
                    for context_pose in image_context_pose
                ]
                sample.update({'pose_context': image_context_pose})

        # Apply transformations
        if self.data_transform:
            sample = self.data_transform(sample)

        # Return sample
        return sample
    def __getitem__(self, idx):
        """Get dataset sample given an index."""
        # Add image information
        sample = {
            'idx': idx,
            'filename': '%s_%010d' % (self.split, idx),
            'rgb': load_convert_image(self.paths[idx]),
        }

        # Add intrinsics
        #parent_folder = self._get_parent_folder(self.paths[idx])
        base_folder_str = self._get_base_folder(self.paths[idx])
        split_type_str = self._get_split_type(self.paths[idx])
        seq_name_str = self._get_sequence_name(self.paths[idx])
        camera_str = self._get_camera_name(self.paths[idx])
        calib_identifier = base_folder_str + split_type_str + seq_name_str + camera_str
        #current_folder = self._get_current_folder(self.paths[idx])
        if calib_identifier in self.calibration_cache:
            c_data = self.calibration_cache[calib_identifier]
        else:
            c_data = self._read_raw_calib_files(base_folder_str,
                                                split_type_str, seq_name_str,
                                                [camera_str],
                                                self.calibrations_suffix)
            self.calibration_cache[calib_identifier] = c_data
        poly_coeffs, principal_point, scale_factors = self._get_intrinsics(
            self.paths[idx], c_data)
        sample.update({
            'intrinsics_poly_coeffs': poly_coeffs,
        })
        sample.update({
            'intrinsics_principal_point': principal_point,
        })
        sample.update({
            'intrinsics_scale_factors': scale_factors,
        })
        sample.update({
            'path_to_theta_lut':
            self._get_path_to_theta_lut(self.paths[idx]),
        })
        sample.update({
            'path_to_ego_mask':
            self._get_path_to_ego_mask(self.paths[idx]),
        })
        sample.update({
            'pose_matrix':
            self._get_extrinsics_pose_matrix(self.paths[idx], c_data),
        })
        # Add pose information if requested
        if self.with_pose:
            sample.update({
                'pose': self._get_pose(self.paths[idx]),
            })

        # Add depth information if requested
        if self.with_depth:
            sample.update({
                'depth':
                self._read_depth(
                    self._get_depth_file(self.paths[idx], self.depth_suffix)),
            })

        if self.cam_convs:
            sample.update({
                'cam_features':
                self._get_cam_features(principal_point[0], principal_point[1],
                                       scale_factors[0], scale_factors[1],
                                       poly_coeffs[0], poly_coeffs[1],
                                       poly_coeffs[2], poly_coeffs[3])
            })

        # Add context information if requested
        if self.with_context:
            # Add context images
            all_context_idxs = self.backward_context_paths[idx] + \
                               self.forward_context_paths[idx]
            image_context_paths, _ = \
                self._get_context_files(self.paths[idx], all_context_idxs)
            same_timestep_as_origin = [False] * len(image_context_paths)
            poly_coeffs_context = [poly_coeffs] * len(image_context_paths)
            principal_point_context = [principal_point
                                       ] * len(image_context_paths)
            scale_factors_context = [scale_factors] * len(image_context_paths)
            path_to_theta_lut_context = [sample['path_to_theta_lut']
                                         ] * len(image_context_paths)
            path_to_ego_mask_context = [sample['path_to_ego_mask']
                                        ] * len(image_context_paths)
            if self.with_geometric_context:
                base_folder_str = self._get_base_folder(self.paths_left[idx])
                split_type_str = self._get_split_type(self.paths_left[idx])
                seq_name_str = self._get_sequence_name(self.paths_left[idx])
                camera_str = self._get_camera_name(self.paths_left[idx])
                calib_identifier = base_folder_str + split_type_str + seq_name_str + camera_str
                if calib_identifier in self.calibration_cache:
                    c_data = self.calibration_cache[calib_identifier]
                else:
                    c_data = self._read_raw_calib_files(
                        base_folder_str, split_type_str, seq_name_str,
                        [camera_str], self.calibrations_suffix)
                    self.calibration_cache[calib_identifier] = c_data
                poly_coeffs, principal_point, scale_factors = self._get_intrinsics(
                    self.paths_left[idx], c_data)
                image_context_paths.append(self.paths_left[idx])
                same_timestep_as_origin.append(True)
                poly_coeffs_context.append(poly_coeffs)
                principal_point_context.append(principal_point)
                scale_factors_context.append(scale_factors)
                path_to_theta_lut_context.append(
                    self._get_path_to_theta_lut(self.paths_left[idx]))
                path_to_ego_mask_context.append(
                    self._get_path_to_ego_mask(self.paths_left[idx]))

                base_folder_str = self._get_base_folder(self.paths_right[idx])
                split_type_str = self._get_split_type(self.paths_right[idx])
                seq_name_str = self._get_sequence_name(self.paths_right[idx])
                camera_str = self._get_camera_name(self.paths_right[idx])
                calib_identifier = base_folder_str + split_type_str + seq_name_str + camera_str
                if calib_identifier in self.calibration_cache:
                    c_data = self.calibration_cache[calib_identifier]
                else:
                    c_data = self._read_raw_calib_files(
                        base_folder_str, split_type_str, seq_name_str,
                        [camera_str], self.calibrations_suffix)
                    self.calibration_cache[calib_identifier] = c_data
                poly_coeffs, principal_point, scale_factors = self._get_intrinsics(
                    self.paths_right[idx], c_data)
                image_context_paths.append(self.paths_right[idx])
                same_timestep_as_origin.append(True)
                poly_coeffs_context.append(poly_coeffs)
                principal_point_context.append(principal_point)
                scale_factors_context.append(scale_factors)
                path_to_theta_lut_context.append(
                    self._get_path_to_theta_lut(self.paths_right[idx]))
                path_to_ego_mask_context.append(
                    self._get_path_to_ego_mask(self.paths_right[idx]))
            image_context = [
                load_convert_image(f) for f in image_context_paths
            ]
            sample.update({'rgb_context': image_context})
            sample.update(
                {'intrinsics_poly_coeffs_context': poly_coeffs_context})
            sample.update({
                'intrinsics_principal_point_context':
                principal_point_context
            })
            sample.update(
                {'intrinsics_scale_factors_context': scale_factors_context})
            sample.update(
                {'path_to_theta_lut_context': path_to_theta_lut_context})
            sample.update(
                {'path_to_ego_mask_context': path_to_ego_mask_context})
            # Add context poses
            #if self.with_geometric_context:
            first_pose = sample['pose_matrix']
            image_context_pose = []

            for i, f in enumerate(image_context_paths):
                #if same_timestep_as_origin[i]:
                base_folder_str = self._get_base_folder(f)
                split_type_str = self._get_split_type(f)
                seq_name_str = self._get_sequence_name(f)
                camera_str = self._get_camera_name(f)
                calib_identifier = base_folder_str + split_type_str + seq_name_str + camera_str
                # current_folder = self._get_current_folder(self.paths[idx])
                if calib_identifier in self.calibration_cache:
                    c_data = self.calibration_cache[calib_identifier]
                else:
                    c_data = self._read_raw_calib_files(
                        base_folder_str, split_type_str, seq_name_str,
                        [camera_str], self.calibrations_suffix)
                    self.calibration_cache[calib_identifier] = c_data
                context_pose = self._get_extrinsics_pose_matrix(f, c_data)
                image_context_pose.append(
                    context_pose @ invert_pose_numpy(first_pose))
                #image_context_pose.append(invert_pose_numpy(invert_pose_numpy(context_pose) @ first_pose))
                #else:
                #    image_context_pose.append(None)

            sample.update({'pose_matrix_context': image_context_pose})
            sample.update(
                {'same_timestep_as_origin_context': same_timestep_as_origin})
            if self.with_pose:
                first_pose = sample['pose']
                image_context_pose = [
                    self._get_pose(f) for f in image_context_paths
                ]
                image_context_pose = [
                    invert_pose_numpy(context_pose) @ first_pose
                    for context_pose in image_context_pose
                ]
                sample.update({'pose_context': image_context_pose})

            if self.cam_convs:
                cam_features_context = []
                for i_context in range(len(image_context_paths)):
                    cam_features_context.append(
                        self._get_cam_features(
                            principal_point_context[i_context][0],
                            principal_point_context[i_context][1],
                            scale_factors_context[i_context][0],
                            scale_factors_context[i_context][1],
                            poly_coeffs_context[i_context][0],
                            poly_coeffs_context[i_context][1],
                            poly_coeffs_context[i_context][2],
                            poly_coeffs_context[i_context][3]))
                sample.update({'cam_features_context': cam_features_context})

        # Apply transformations
        if self.data_transform:
            sample = self.data_transform(sample)

        # Return sample
        return sample
Пример #5
0
 def _read_rgb_file(self, session, filename):
     return load_convert_image(
         os.path.join(self.root_dir, session, filename))
Пример #6
0
 def _read_rgb_context_files(self, session, filename):
     context_paths = self._get_context_file_paths(filename)
     return [
         load_convert_image(os.path.join(self.root_dir, session, filename))
         for filename in context_paths
     ]
Пример #7
0
    def __getitem__(self, idx):
        """Get dataset sample given an index."""
        # Add image information
        sample = {
            'idx': idx,
            'filename': '%s_%010d' % (self.split, idx),
            'rgb': load_convert_image(self.paths[idx]),
        }

        # Add intrinsics
        #parent_folder = self._get_parent_folder(self.paths[idx])
        current_folder = self._get_current_folder(self.paths[idx])
        if current_folder in self.calibration_cache:
            c_data = self.calibration_cache[current_folder]
        else:
            c_data = self._read_raw_calib_files(current_folder, self.cameras)
            self.calibration_cache[current_folder] = c_data
        sample.update({
            'intrinsics':
            self._get_intrinsics(self.paths[idx], c_data),
        })

        # Add pose information if requested
        if self.with_pose:
            sample.update({
                'pose': self._get_pose(self.paths[idx]),
            })

        # Add depth information if requested
        if self.with_depth:
            sample.update({
                'depth':
                self._read_depth(self._get_depth_file(self.paths[idx])),
            })

        # Add context information if requested
        if self.with_context:
            # Add context images
            all_context_idxs = self.backward_context_paths[idx] + \
                               self.forward_context_paths[idx]
            image_context_paths, _ = \
                self._get_context_files(self.paths[idx], all_context_idxs)
            image_context = [
                load_convert_image(f) for f in image_context_paths
            ]
            sample.update({'rgb_context': image_context})
            # Add context poses
            if self.with_pose:
                first_pose = sample['pose']
                image_context_pose = [
                    self._get_pose(f) for f in image_context_paths
                ]
                image_context_pose = [
                    invert_pose_numpy(context_pose) @ first_pose
                    for context_pose in image_context_pose
                ]
                sample.update({'pose_context': image_context_pose})

        # Apply transformations
        if self.data_transform:
            sample = self.data_transform(sample)

        # Return sample
        return sample
Пример #8
0
    def __getitem__(self, idx):
        """Get dataset sample given an index."""
        # Add image information
        sample = {
            'idx': idx,
            'filename': '%s_%05d' % (self.split, idx),
            'rgb': load_convert_image(self.paths[idx]),
        }

        # Add intrinsics
        c_data = self._read_raw_calib_files(self.paths[idx], self.cameras)

        poly_coeffs, principal_point, scale_factor_y = self._get_intrinsics(
            self.paths[idx], c_data)
        sample.update({
            'intrinsics_poly_coeffs': poly_coeffs,
        })
        sample.update({
            'intrinsics_principal_point': principal_point,
        })
        sample.update({
            'intrinsics_scale_factor_y': scale_factor_y,
        })

        sample.update({
            'path_to_theta_lut':
            self._get_path_to_theta_lut(self.paths[idx], c_data),
        })
        # Add pose information if requested
        if self.with_pose:
            sample.update({
                'pose': self._get_pose(self.paths[idx]),
            })

        # Add depth information if requested
        if self.with_depth:
            sample.update({
                'depth':
                self._read_depth(self._get_depth_file(self.paths[idx])),
            })

        # Add context information if requested
        if self.with_context:
            # Add context images
            all_context_idxs = self.backward_context_paths[idx]  #+ \
            #self.forward_context_paths[idx]
            image_context_paths, _ = \
                self._get_context_files(self.paths[idx], all_context_idxs)
            image_context = [
                load_convert_image(f) for f in image_context_paths
            ]
            sample.update({'rgb_context': image_context})
            # Add context poses
            if self.with_pose:
                first_pose = sample['pose']
                image_context_pose = [
                    self._get_pose(f) for f in image_context_paths
                ]
                image_context_pose = [
                    invert_pose_numpy(context_pose) @ first_pose
                    for context_pose in image_context_pose
                ]
                sample.update({'pose_context': image_context_pose})

        # Apply transformations
        if self.data_transform:
            sample = self.data_transform(sample)

        # Return sample
        return sample