def __getitem__(self, idx):
        """Get dataset sample given an index."""
        # Add image information
        sample = {
            'idx': idx,
            'filename': '%s_%010d' % (self.split, idx),
            'rgb': load_convert_image(self.paths[idx]),
        }

        # Add intrinsics
        #parent_folder = self._get_parent_folder(self.paths[idx])
        base_folder_str = self._get_base_folder(self.paths[idx])
        split_type_str = self._get_split_type(self.paths[idx])
        seq_name_str = self._get_sequence_name(self.paths[idx])
        camera_str = self._get_camera_name(self.paths[idx])
        calib_identifier = base_folder_str + split_type_str + seq_name_str + camera_str
        #current_folder = self._get_current_folder(self.paths[idx])
        if calib_identifier in self.calibration_cache:
            c_data = self.calibration_cache[calib_identifier]
        else:
            c_data = self._read_raw_calib_files(base_folder_str,
                                                split_type_str, seq_name_str,
                                                [camera_str],
                                                self.calibrations_suffix)
            self.calibration_cache[calib_identifier] = c_data
        poly_coeffs, principal_point, scale_factors = self._get_intrinsics(
            self.paths[idx], c_data)
        sample.update({
            'intrinsics_poly_coeffs': poly_coeffs,
        })
        sample.update({
            'intrinsics_principal_point': principal_point,
        })
        sample.update({
            'intrinsics_scale_factors': scale_factors,
        })
        sample.update({
            'path_to_theta_lut':
            self._get_path_to_theta_lut(self.paths[idx]),
        })
        sample.update({
            'path_to_ego_mask':
            self._get_path_to_ego_mask(self.paths[idx]),
        })
        sample.update({
            'pose_matrix':
            self._get_extrinsics_pose_matrix(self.paths[idx], c_data),
        })
        # Add pose information if requested
        if self.with_pose:
            sample.update({
                'pose': self._get_pose(self.paths[idx]),
            })

        # Add depth information if requested
        if self.with_depth:
            sample.update({
                'depth':
                self._read_depth(
                    self._get_depth_file(self.paths[idx], self.depth_suffix)),
            })

        if self.cam_convs:
            sample.update({
                'cam_features':
                self._get_cam_features(principal_point[0], principal_point[1],
                                       scale_factors[0], scale_factors[1],
                                       poly_coeffs[0], poly_coeffs[1],
                                       poly_coeffs[2], poly_coeffs[3])
            })

        # Add context information if requested
        if self.with_context:
            # Add context images
            all_context_idxs = self.backward_context_paths[idx] + \
                               self.forward_context_paths[idx]
            image_context_paths, _ = \
                self._get_context_files(self.paths[idx], all_context_idxs)
            same_timestep_as_origin = [False] * len(image_context_paths)
            poly_coeffs_context = [poly_coeffs] * len(image_context_paths)
            principal_point_context = [principal_point
                                       ] * len(image_context_paths)
            scale_factors_context = [scale_factors] * len(image_context_paths)
            path_to_theta_lut_context = [sample['path_to_theta_lut']
                                         ] * len(image_context_paths)
            path_to_ego_mask_context = [sample['path_to_ego_mask']
                                        ] * len(image_context_paths)
            if self.with_geometric_context:
                base_folder_str = self._get_base_folder(self.paths_left[idx])
                split_type_str = self._get_split_type(self.paths_left[idx])
                seq_name_str = self._get_sequence_name(self.paths_left[idx])
                camera_str = self._get_camera_name(self.paths_left[idx])
                calib_identifier = base_folder_str + split_type_str + seq_name_str + camera_str
                if calib_identifier in self.calibration_cache:
                    c_data = self.calibration_cache[calib_identifier]
                else:
                    c_data = self._read_raw_calib_files(
                        base_folder_str, split_type_str, seq_name_str,
                        [camera_str], self.calibrations_suffix)
                    self.calibration_cache[calib_identifier] = c_data
                poly_coeffs, principal_point, scale_factors = self._get_intrinsics(
                    self.paths_left[idx], c_data)
                image_context_paths.append(self.paths_left[idx])
                same_timestep_as_origin.append(True)
                poly_coeffs_context.append(poly_coeffs)
                principal_point_context.append(principal_point)
                scale_factors_context.append(scale_factors)
                path_to_theta_lut_context.append(
                    self._get_path_to_theta_lut(self.paths_left[idx]))
                path_to_ego_mask_context.append(
                    self._get_path_to_ego_mask(self.paths_left[idx]))

                base_folder_str = self._get_base_folder(self.paths_right[idx])
                split_type_str = self._get_split_type(self.paths_right[idx])
                seq_name_str = self._get_sequence_name(self.paths_right[idx])
                camera_str = self._get_camera_name(self.paths_right[idx])
                calib_identifier = base_folder_str + split_type_str + seq_name_str + camera_str
                if calib_identifier in self.calibration_cache:
                    c_data = self.calibration_cache[calib_identifier]
                else:
                    c_data = self._read_raw_calib_files(
                        base_folder_str, split_type_str, seq_name_str,
                        [camera_str], self.calibrations_suffix)
                    self.calibration_cache[calib_identifier] = c_data
                poly_coeffs, principal_point, scale_factors = self._get_intrinsics(
                    self.paths_right[idx], c_data)
                image_context_paths.append(self.paths_right[idx])
                same_timestep_as_origin.append(True)
                poly_coeffs_context.append(poly_coeffs)
                principal_point_context.append(principal_point)
                scale_factors_context.append(scale_factors)
                path_to_theta_lut_context.append(
                    self._get_path_to_theta_lut(self.paths_right[idx]))
                path_to_ego_mask_context.append(
                    self._get_path_to_ego_mask(self.paths_right[idx]))
            image_context = [
                load_convert_image(f) for f in image_context_paths
            ]
            sample.update({'rgb_context': image_context})
            sample.update(
                {'intrinsics_poly_coeffs_context': poly_coeffs_context})
            sample.update({
                'intrinsics_principal_point_context':
                principal_point_context
            })
            sample.update(
                {'intrinsics_scale_factors_context': scale_factors_context})
            sample.update(
                {'path_to_theta_lut_context': path_to_theta_lut_context})
            sample.update(
                {'path_to_ego_mask_context': path_to_ego_mask_context})
            # Add context poses
            #if self.with_geometric_context:
            first_pose = sample['pose_matrix']
            image_context_pose = []

            for i, f in enumerate(image_context_paths):
                #if same_timestep_as_origin[i]:
                base_folder_str = self._get_base_folder(f)
                split_type_str = self._get_split_type(f)
                seq_name_str = self._get_sequence_name(f)
                camera_str = self._get_camera_name(f)
                calib_identifier = base_folder_str + split_type_str + seq_name_str + camera_str
                # current_folder = self._get_current_folder(self.paths[idx])
                if calib_identifier in self.calibration_cache:
                    c_data = self.calibration_cache[calib_identifier]
                else:
                    c_data = self._read_raw_calib_files(
                        base_folder_str, split_type_str, seq_name_str,
                        [camera_str], self.calibrations_suffix)
                    self.calibration_cache[calib_identifier] = c_data
                context_pose = self._get_extrinsics_pose_matrix(f, c_data)
                image_context_pose.append(
                    context_pose @ invert_pose_numpy(first_pose))
                #image_context_pose.append(invert_pose_numpy(invert_pose_numpy(context_pose) @ first_pose))
                #else:
                #    image_context_pose.append(None)

            sample.update({'pose_matrix_context': image_context_pose})
            sample.update(
                {'same_timestep_as_origin_context': same_timestep_as_origin})
            if self.with_pose:
                first_pose = sample['pose']
                image_context_pose = [
                    self._get_pose(f) for f in image_context_paths
                ]
                image_context_pose = [
                    invert_pose_numpy(context_pose) @ first_pose
                    for context_pose in image_context_pose
                ]
                sample.update({'pose_context': image_context_pose})

            if self.cam_convs:
                cam_features_context = []
                for i_context in range(len(image_context_paths)):
                    cam_features_context.append(
                        self._get_cam_features(
                            principal_point_context[i_context][0],
                            principal_point_context[i_context][1],
                            scale_factors_context[i_context][0],
                            scale_factors_context[i_context][1],
                            poly_coeffs_context[i_context][0],
                            poly_coeffs_context[i_context][1],
                            poly_coeffs_context[i_context][2],
                            poly_coeffs_context[i_context][3]))
                sample.update({'cam_features_context': cam_features_context})

        # Apply transformations
        if self.data_transform:
            sample = self.data_transform(sample)

        # Return sample
        return sample
    def __getitem__(self, idx):
        """Get dataset sample given an index."""
        # Add image information
        sample = {
            'idx': idx,
            'filename': '%s_%010d' % (self.split, idx),
            'rgb': load_convert_image(self.paths[idx]),
        }

        # Add intrinsics
        #parent_folder = self._get_parent_folder(self.paths[idx])
        base_folder_str = self._get_base_folder(self.paths[idx])
        split_type_str = self._get_split_type(self.paths[idx])
        seq_name_str = self._get_sequence_name(self.paths[idx])
        camera_str = self._get_camera_name(self.paths[idx])
        calib_identifier = base_folder_str + split_type_str + seq_name_str + camera_str
        #current_folder = self._get_current_folder(self.paths[idx])
        if calib_identifier in self.calibration_cache:
            c_data = self.calibration_cache[calib_identifier]
        else:
            c_data = self._read_raw_calib_files(base_folder_str,
                                                split_type_str, seq_name_str,
                                                [camera_str])
            self.calibration_cache[calib_identifier] = c_data

        K, k, p = self._get_intrinsics(self.paths[idx], c_data)
        sample.update({
            'intrinsics_K': K,
        })
        sample.update({
            'intrinsics_k': k,
        })
        sample.update({
            'intrinsics_p': p,
        })
        sample.update({
            'path_to_ego_mask':
            self._get_path_to_ego_mask(self.paths[idx]),
        })
        # Add pose information if requested
        if self.with_pose:
            sample.update({
                'pose': self._get_pose(self.paths[idx]),
            })

        # Add depth information if requested
        if self.with_depth:
            sample.update({
                'depth':
                self._read_depth(self._get_depth_file(self.paths[idx])),
            })

        # Add context information if requested
        if self.with_context:
            # Add context images
            all_context_idxs = self.backward_context_paths[
                idx] + self.forward_context_paths[idx]
            image_context_paths, _ = self._get_context_files(
                self.paths[idx], all_context_idxs)
            # same_timestep_as_origin   = [False for _ in range(len(image_context_paths))]
            # intrinsics_K_context       = [K for _ in range(len(image_context_paths))]
            # intrinsics_k_context       = [k for _ in range(len(image_context_paths))]
            # intrinsics_p_context       = [p for _ in range(len(image_context_paths))]
            path_to_ego_mask_context = [
                sample['path_to_ego_mask']
                for _ in range(len(image_context_paths))
            ]
            image_context = [
                load_convert_image(f) for f in image_context_paths
            ]
            sample.update({'rgb_context': image_context})
            # sample.update({
            #     'intrinsics_K_context': intrinsics_K_context
            # })
            # sample.update({
            #     'intrinsics_k_context': intrinsics_k_context
            # })
            # sample.update({
            #     'intrinsics_p_context': intrinsics_p_context
            # })
            sample.update(
                {'path_to_ego_mask_context': path_to_ego_mask_context})
            if self.with_pose:
                first_pose = sample['pose']
                image_context_pose = [
                    self._get_pose(f) for f in image_context_paths
                ]
                image_context_pose = [
                    invert_pose_numpy(context_pose) @ first_pose
                    for context_pose in image_context_pose
                ]
                sample.update({'pose_context': image_context_pose})

        # Apply transformations
        if self.data_transform:
            sample = self.data_transform(sample)

        # Return sample
        return sample
コード例 #3
0
    def __getitem__(self, idx):
        """Get dataset sample given an index."""
        # Add image information
        sample = {
            'idx': idx,
            'filename': '%s_%05d' % (self.split, idx),
            'rgb': load_convert_image(self.paths[idx]),
        }

        # Add intrinsics
        c_data = self._read_raw_calib_files(self.paths[idx], self.cameras)

        poly_coeffs, principal_point, scale_factor_y = self._get_intrinsics(
            self.paths[idx], c_data)
        sample.update({
            'intrinsics_poly_coeffs': poly_coeffs,
        })
        sample.update({
            'intrinsics_principal_point': principal_point,
        })
        sample.update({
            'intrinsics_scale_factor_y': scale_factor_y,
        })

        sample.update({
            'path_to_theta_lut':
            self._get_path_to_theta_lut(self.paths[idx], c_data),
        })
        # Add pose information if requested
        if self.with_pose:
            sample.update({
                'pose': self._get_pose(self.paths[idx]),
            })

        # Add depth information if requested
        if self.with_depth:
            sample.update({
                'depth':
                self._read_depth(self._get_depth_file(self.paths[idx])),
            })

        # Add context information if requested
        if self.with_context:
            # Add context images
            all_context_idxs = self.backward_context_paths[idx]  #+ \
            #self.forward_context_paths[idx]
            image_context_paths, _ = \
                self._get_context_files(self.paths[idx], all_context_idxs)
            image_context = [
                load_convert_image(f) for f in image_context_paths
            ]
            sample.update({'rgb_context': image_context})
            # Add context poses
            if self.with_pose:
                first_pose = sample['pose']
                image_context_pose = [
                    self._get_pose(f) for f in image_context_paths
                ]
                image_context_pose = [
                    invert_pose_numpy(context_pose) @ first_pose
                    for context_pose in image_context_pose
                ]
                sample.update({'pose_context': image_context_pose})

        # Apply transformations
        if self.data_transform:
            sample = self.data_transform(sample)

        # Return sample
        return sample