Example #1
0
 def __getitem__(self, index):
     """
     :param index: The index within the entire dataset.
     :type index: int
     :return: An example and label from the crowd dataset.
     :rtype: torch.Tensor, torch.Tensor
     """
     index_ = random.randrange(self.length)
     file_name_index = np.searchsorted(self.start_indexes, index_, side='right') - 1
     start_index = self.start_indexes[file_name_index]
     file_name = self.file_names[file_name_index]
     position_index = index_ - start_index
     extract_patch_transform = ExtractPatchForPosition(self.image_patch_size, self.label_patch_size,
                                                       allow_padded=True)  # In case image is smaller than patch.
     preprocess_transform = torchvision.transforms.Compose([NegativeOneToOneNormalizeImage(),
                                                            NumpyArraysToTorchTensors()])
     image = np.load(os.path.join(self.dataset_directory, 'images', file_name), mmap_mode='r')
     label = np.load(os.path.join(self.dataset_directory, 'labels', file_name), mmap_mode='r')
     map_ = np.load(os.path.join(self.dataset_directory, self.map_directory_name, file_name), mmap_mode='r')
     half_patch_size = int(self.image_patch_size // 2)
     y_positions = range(half_patch_size, image.shape[0] - half_patch_size + 1)
     x_positions = range(half_patch_size, image.shape[1] - half_patch_size + 1)
     positions_shape = [len(y_positions), len(x_positions)]
     y_index, x_index = np.unravel_index(position_index, positions_shape)
     y = y_positions[y_index]
     x = x_positions[x_index]
     example = CrowdExample(image=image, label=label, map_=map_)
     example = extract_patch_transform(example, y, x)
     if self.middle_transform:
         example = self.middle_transform(example)
     example = preprocess_transform(example)
     return example.image, example.label, example.map
Example #2
0
 def __getitem__(self, index):
     """
     :param index: The index within the entire dataset.
     :type index: int
     :return: An example and label from the crowd dataset.
     :rtype: torch.Tensor, torch.Tensor
     """
     index_ = random.randrange(self.length)
     camera_data_index = np.searchsorted(self.start_indexes, index_, side='right') - 1
     start_index = self.start_indexes[camera_data_index]
     camera_data = self.camera_data_list[camera_data_index]
     camera_images = camera_data.images
     array_index = index_ - start_index
     half_patch_size = int(self.image_patch_size // 2)
     y_positions = range(half_patch_size, camera_images.shape[1] - half_patch_size + 1)
     x_positions = range(half_patch_size, camera_images.shape[2] - half_patch_size + 1)
     image_indexes_length = len(y_positions) * len(x_positions)
     image_index = math.floor(array_index / image_indexes_length)
     position_index = array_index % image_indexes_length
     image = camera_data.images[image_index]
     label = camera_data.labels[image_index]
     map_ = label
     extract_patch_transform = ExtractPatchForPosition(self.image_patch_size, self.label_patch_size,
                                                       allow_padded=True)  # In case image is smaller than patch.
     preprocess_transform = torchvision.transforms.Compose([NegativeOneToOneNormalizeImage(),
                                                            NumpyArraysToTorchTensors()])
     y_positions = range(half_patch_size, image.shape[0] - half_patch_size + 1)
     x_positions = range(half_patch_size, image.shape[1] - half_patch_size + 1)
     positions_shape = [len(y_positions), len(x_positions)]
     y_index, x_index = np.unravel_index(position_index, positions_shape)
     y = y_positions[y_index]
     x = x_positions[x_index]
     example = CrowdExample(image=image, label=label, map_=map_)
     example = extract_patch_transform(example, y, x)
     if self.middle_transform:
         example = self.middle_transform(example)
     example = preprocess_transform(example)
     return example.image, example.label, example.map