示例#1
0
    def __getitem__(self, index):
        """
        Return item at index in 0 to len(self)
        In this case a set of crops from an lf sample
        Return type is a dictionary of depth and colour arrays
        """
        with h5py.File(self.file_path, mode='r', libver='latest',
                       swmr=True) as h5_file:
            idx = index // self.num_crops
            colour = torch.tensor(h5_file[self.colour][idx],
                                  dtype=torch.float32)
            warped = torch.tensor(h5_file[self.warped][idx],
                                  dtype=torch.float32)
            grid_size = self.grid_size
            sample = {
                'colour': colour,
                'warped': warped,
                'grid_size': grid_size
            }

            if self.crop_train:
                sample = data_transform.get_random_crop(
                    sample, self.patch_size)
            sample = data_transform.normalise_sample(sample)
            sample = data_transform.random_gamma(sample)

            if self.sub_chan:
                sample = data_transform.subsample_channels(sample, 3)
            if self.transform:
                sample = self.transform(sample)

            return sample
示例#2
0
    def __getitem__(self, index):
        """
        Return item at index in 0 to len(self)
        In this case a set of crops from an lf sample
        Return type is a dictionary of depth and colour arrays
        """
        with h5py.File(self.file_path, mode='r', libver='latest',
                       swmr=True) as h5_file:
            idx = index // self.num_crops
            depth = torch.squeeze(
                torch.tensor(h5_file[self.depth][idx], dtype=torch.float32))
            colour = torch.tensor(h5_file[self.colour][idx],
                                  dtype=torch.float32)
            grid_size = self.grid_size
            sample = {'depth': depth, 'colour': colour, 'grid_size': grid_size}

            sample = data_transform.get_random_crop(sample, self.patch_size)
            sample = data_transform.random_gamma(sample)

            if self.transform:
                sample = self.transform(sample)

            sample = data_transform.stack(sample, channels=65)

            return sample