Exemple #1
0
 def __getitem__(self, index):
     """
     :param index: The index within the entire dataset.
     :type index: int
     :return: An example and label from the crowd dataset.
     :rtype: torch.Tensor, torch.Tensor
     """
     index_ = random.randrange(self.length)
     file_name_index = np.searchsorted(self.start_indexes, index_, side='right') - 1
     start_index = self.start_indexes[file_name_index]
     file_name = self.file_names[file_name_index]
     position_index = index_ - start_index
     extract_patch_transform = ExtractPatchForPosition(self.image_patch_size, self.label_patch_size,
                                                       allow_padded=True)  # In case image is smaller than patch.
     preprocess_transform = torchvision.transforms.Compose([NegativeOneToOneNormalizeImage(),
                                                            NumpyArraysToTorchTensors()])
     image = np.load(os.path.join(self.dataset_directory, 'images', file_name), mmap_mode='r')
     label = np.load(os.path.join(self.dataset_directory, 'labels', file_name), mmap_mode='r')
     map_ = np.load(os.path.join(self.dataset_directory, self.map_directory_name, file_name), mmap_mode='r')
     half_patch_size = int(self.image_patch_size // 2)
     y_positions = range(half_patch_size, image.shape[0] - half_patch_size + 1)
     x_positions = range(half_patch_size, image.shape[1] - half_patch_size + 1)
     positions_shape = [len(y_positions), len(x_positions)]
     y_index, x_index = np.unravel_index(position_index, positions_shape)
     y = y_positions[y_index]
     x = x_positions[x_index]
     example = CrowdExample(image=image, label=label, map_=map_)
     example = extract_patch_transform(example, y, x)
     if self.middle_transform:
         example = self.middle_transform(example)
     example = preprocess_transform(example)
     return example.image, example.label, example.map
Exemple #2
0
    def batches_of_patches_with_position(self,
                                         full_example,
                                         window_step_size=32):
        """
        A generator for extracting patches from an image in batches.

        :param full_example: The full example to be patched.
        :type full_example: CrowdExample
        :param window_step_size: The sliding window size.
        :type window_step_size: int
        :return: A batch of patches.
        :rtype: list[list[CrowdExample]]
        """
        extract_patch_transform = ExtractPatchForPosition()
        test_transform = torchvision.transforms.Compose([
            data.NegativeOneToOneNormalizeImage(),
            data.NumpyArraysToTorchTensors()
        ])
        batch = []
        for y in range(0, full_example.label.shape[0], window_step_size):
            for x in range(0, full_example.label.shape[1], window_step_size):
                patch = extract_patch_transform(full_example, y, x)
                example = test_transform(patch)
                example_with_position = CrowdExample(image=example.image,
                                                     label=example.label,
                                                     patch_center_x=x,
                                                     patch_center_y=y)
                batch.append(example_with_position)
                if len(batch) == self.settings.batch_size:
                    yield batch
                    batch = []
        yield batch
Exemple #3
0
 def batches_of_patches_with_position(self, full_example, window_step_size=32):
     extract_patch_transform = ExtractPatchForPosition()
     test_transform = torchvision.transforms.Compose([data.NegativeOneToOneNormalizeImage(),
                                                      data.NumpyArraysToTorchTensors()])
     batch = []
     for y in range(0, full_example.label.shape[0], window_step_size):
         for x in range(0, full_example.label.shape[1], window_step_size):
             patch = extract_patch_transform(full_example, y, x)
             example = test_transform(patch)
             example_with_position = CrowdExampleWithPosition(example.image, example.label, x, y)
             batch.append(example_with_position)
             if len(batch) == self.settings.batch_size:
                 yield batch
                 batch = []
     yield batch
Exemple #4
0
 def __getitem__(self, index):
     """
     :param index: The index within the entire dataset.
     :type index: int
     :return: An example and label from the crowd dataset.
     :rtype: torch.Tensor, torch.Tensor
     """
     index_ = random.randrange(self.length)
     camera_data_index = np.searchsorted(self.start_indexes, index_, side='right') - 1
     start_index = self.start_indexes[camera_data_index]
     camera_data = self.camera_data_list[camera_data_index]
     camera_images = camera_data.images
     array_index = index_ - start_index
     half_patch_size = int(self.image_patch_size // 2)
     y_positions = range(half_patch_size, camera_images.shape[1] - half_patch_size + 1)
     x_positions = range(half_patch_size, camera_images.shape[2] - half_patch_size + 1)
     image_indexes_length = len(y_positions) * len(x_positions)
     image_index = math.floor(array_index / image_indexes_length)
     position_index = array_index % image_indexes_length
     image = camera_data.images[image_index]
     label = camera_data.labels[image_index]
     map_ = label
     extract_patch_transform = ExtractPatchForPosition(self.image_patch_size, self.label_patch_size,
                                                       allow_padded=True)  # In case image is smaller than patch.
     preprocess_transform = torchvision.transforms.Compose([NegativeOneToOneNormalizeImage(),
                                                            NumpyArraysToTorchTensors()])
     y_positions = range(half_patch_size, image.shape[0] - half_patch_size + 1)
     x_positions = range(half_patch_size, image.shape[1] - half_patch_size + 1)
     positions_shape = [len(y_positions), len(x_positions)]
     y_index, x_index = np.unravel_index(position_index, positions_shape)
     y = y_positions[y_index]
     x = x_positions[x_index]
     example = CrowdExample(image=image, label=label, map_=map_)
     example = extract_patch_transform(example, y, x)
     if self.middle_transform:
         example = self.middle_transform(example)
     example = preprocess_transform(example)
     return example.image, example.label, example.map