Ejemplo n.º 1
0
    def __call__(self, input_image):
        """
        Transforms an image by first shifting and scaling, and then optionally clamps the values.
        :param input_image:
        :param shift:
        :param scale:
        :param clamp_min:
        :param clamp_max:
        :return:
        """
        output_image = input_image
        if self.shift is not None or self.scale is not None:
            output_image = rescale(output_image,
                                   shift=self.shift,
                                   scale=self.scale)
        if self.random_shift is not None or self.random_scale is not None:
            current_random_shift = random.float_uniform(
                -self.random_shift, self.random_shift)
            current_random_scale = 1 + random.float_uniform(
                -self.random_scale, self.random_scale)
            output_image = rescale(output_image,
                                   shift=current_random_shift,
                                   scale=current_random_scale)
        if self.clamp_min is not None or self.clamp_max is not None:
            output_image = clamp(output_image,
                                 clamp_min=self.clamp_min,
                                 clamp_max=self.clamp_max)

        return output_image
Ejemplo n.º 2
0
    def get(self, **kwargs):
        """
        Apply random scaling, in each dimension. The scale factor will be computed from a random uniform
        distribution 1+[-random_scale, random_scale].
        

        :param input_image: ITK image
        :param random_scale: float, list of floats
            ranges for uniform random scaling in each dimension
        :param kwargs:
            chain: if True returns the transform instead of the output image (default=False)
        :return:
        """
        # scale by individual factor in each dimension
        if isinstance(self.random_scale, list) or isinstance(
                self.random_scale, tuple):
            current_scale = [
                1.0 +
                float_uniform(-self.random_scale[i], self.random_scale[i])
                for i in range(len(self.random_scale))
            ]
        else:
            current_scale = 1.0 + float_uniform(-self.random_scale,
                                                self.random_scale)
        return self.get_scale_transform(current_scale)
Ejemplo n.º 3
0
    def get_deformation_transform(dim,
                                  grid_nodes,
                                  origin,
                                  physical_dimensions,
                                  spline_order,
                                  deformation_value):
        """
        Returns the sitk transform based on the given parameters.
        :param dim: The dimension.
        :param grid_nodes: The number of grid nodes in each dimension.
        :param origin: The domain origin.
        :param physical_dimensions: The domain physical size.
        :param spline_order: The spline order.
        :param deformation_value: The maximum deformation value.
        :return: The sitk.BSplineTransform() with the specified parameters.
        """
        mesh_size = [grid_node - spline_order for grid_node in grid_nodes]

        t = sitk.BSplineTransform(dim, spline_order)
        t.SetTransformDomainOrigin(origin)
        t.SetTransformDomainMeshSize(mesh_size)
        t.SetTransformDomainPhysicalDimensions(physical_dimensions)
        t.SetTransformDomainDirection(np.eye(dim).flatten())

        if isinstance(deformation_value, list) or isinstance(deformation_value, tuple):
            deform_params = []
            for v in deformation_value:
                for i in range(int(np.prod(grid_nodes))):
                    deform_params.append(float_uniform(-v, v))
        else:
            deform_params = [float_uniform(-deformation_value, deformation_value)
                             for _ in t.GetParameters()]
        t.SetParameters(deform_params)

        return t
def shift_scale_clamp(input_image,
                      shift=None,
                      scale=None,
                      clamp_min=None,
                      clamp_max=None,
                      random_shift=None,
                      random_scale=None):
    """
    Transforms an image by first shifting and scaling, and then optionally clamps the values.
    Order of operations:
        image += shift
        image *= scale
        image += random.float_uniform(-random_shift, random_shift)
        image *= 1 + random.float_uniform(-random_scale, random_scale)
        image = np.clip(image, clamp_min, clamp_max)
    :param input_image: The sitk image.
    :param shift: The intensity shift (added) value (image += shift).
    :param scale: The intensity scale (multiplied) value (image *= scale).
    :param clamp_min: The minimum value to clamp (image = np.clip(image, clamp_min, clamp_max)).
    :param clamp_max: The maximum value to clamp (image = np.clip(image, clamp_min, clamp_max)).
    :param random_shift: The random shift (image += random.float_uniform(-random_shift, random_shift)).
    :param random_scale: The additional random scale (image *= 1 + random.float_uniform(-random_scale, random_scale)).
    :return: The transformed sitk image.
    """
    output_image = input_image
    if shift is not None or scale is not None:
        output_image = shift_scale(output_image, shift=shift, scale=scale)
    if random_shift is not None or random_scale is not None:
        current_random_shift = random.float_uniform(-random_shift, random_shift)
        current_random_scale = 1 + random.float_uniform(-random_scale, random_scale)
        output_image = shift_scale(output_image, shift=current_random_shift, scale=current_random_scale)
    if clamp_min is not None or clamp_max is not None:
        output_image = clamp(output_image, clamp_min=clamp_min, clamp_max=clamp_max)
    return output_image
Ejemplo n.º 5
0
    def __call__(self, input_image):
        """
        Transforms an image by first shifting and scaling, and then optionally clamps the values.
        Order of operations:
            image += shift
            image *= scale
            image += random.float_uniform(-random_shift, random_shift)
            image *= 1 + random.float_uniform(-random_scale, random_scale)
            image = np.clip(image, clamp_min, clamp_max)
        :param input_image: np input image
        :return: np processed image
        """
        output_image = input_image
        if self.shift is not None:
            output_image += self.shift
        if self.scale is not None:
            output_image *= self.scale
        if self.random_shift is not None:
            current_random_shift = random.float_uniform(
                -self.random_shift, self.random_shift)
            output_image += current_random_shift
        if self.random_scale is not None:
            current_random_scale = 1 + random.float_uniform(
                -self.random_scale, self.random_scale)
            output_image *= current_random_scale
        if self.clamp_min is not None or self.clamp_max is not None:
            output_image = np.clip(output_image, self.clamp_min,
                                   self.clamp_max)

        return output_image
 def get(self, **kwargs):
     """
     Returns the sitk transform based on the given parameters.
     :param kwargs: Must contain either 'image', or 'input_size' and 'input_spacing', which define the input image physical space.
     :return: The sitk.AffineTransform().
     """
     # TODO check, if direction or origin are really needed
     input_size, input_spacing, input_direction, input_origin = self.get_image_size_spacing_direction_origin(
         **kwargs)
     assert np.allclose(
         input_direction,
         np.eye(self.dim).flatten()
     ), 'this transformation only works for eye direction, is: ' + input_direction
     assert np.allclose(
         input_origin, np.zeros(self.dim)
     ), 'this transformation only works for zeros origin, is: ' + input_origin
     max_translation = [
         input_size[i] * input_spacing[i] - self.remove_border[i]
         for i in range(self.dim)
     ]
     current_offset = [
         max_translation[i] *
         float_uniform(-self.random_factor[i], self.random_factor[i])
         for i in range(len(self.random_factor))
     ]
     return self.get_translate_transform(self.dim, current_offset)
Ejemplo n.º 7
0
 def get_random_move(self, image):
     """
     Calculates a list of x/y offsets for a given image.
     :param image: The image.
     :return: List of x/y offset tuples.
     """
     random_move_x = float_uniform(-self.image_size[0] * self.random_move, self.image_size[0] * self.random_move)
     random_move_y = float_uniform(-self.image_size[1] * self.random_move, self.image_size[1] * self.random_move)
     frame_axis = self.video_frame_stack_axis
     random_moves = []
     for i in range(image.shape[frame_axis]):
         displacement_x = (i / image.shape[frame_axis]) * random_move_x
         displacement_y = (i / image.shape[frame_axis]) * random_move_y
         displacement_x += float_uniform(-self.image_size[0] * self.random_jiggle, self.image_size[0] * self.random_jiggle)
         displacement_y += float_uniform(-self.image_size[1] * self.random_jiggle, self.image_size[1] * self.random_jiggle)
         random_moves.append((int(displacement_x), int(displacement_y)))
     return random_moves
 def get(self, **kwargs):
     """
     Returns the sitk transform based on the given parameters.
     :param kwargs: Not used.
     :return: The sitk.AffineTransform().
     """
     current_offset = [float_uniform(-self.random_offset[i], self.random_offset[i])
                       for i in range(len(self.random_offset))]
     return self.get_translate_transform(self.dim, current_offset)
Ejemplo n.º 9
0
 def get(self, **kwargs):
     """
     Returns the sitk transform based on the given parameters.
     :param kwargs: Not used.
     :return: The sitk.AffineTransform().
     """
     if self.dim == 2:
         self.current_angles = [float_uniform(-self.random_angles[0], self.random_angles[0])]
     elif self.dim == 3:
         # rotate by same random angle in each dimension
         if len(self.random_angles) == 1:
             angle = float_uniform(-self.random_angles[0], self.random_angles[0])
             self.current_angles = [angle] * self.dim
         else:
             # rotate by individual angle in each dimension
             self.current_angles = [float_uniform(-self.random_angles[i], self.random_angles[i])
                                    for i in range(self.dim)]
     return self.get_rotation_transform(self.dim, self.current_angles)
Ejemplo n.º 10
0
 def intensity_postprocessing_mr_random(self, image):
     """
     Intensity postprocessing for MR input. Random augmentation version.
     :param image: The np input image.
     :return: The processed image.
     """
     image = change_gamma_unnormalized(image, float_uniform(0.5, 1.5))
     image = normalize_robust(image, consideration_factors=(0.1, 0.1))
     return ShiftScaleClamp(random_shift=0.6,
                            random_scale=0.6,
                            clamp_min=-1.0)(image)
Ejemplo n.º 11
0
 def intensity_postprocessing_ct_random(self, image):
     """
     Intensity postprocessing for CT input. Random augmentation version.
     :param image: The np input image.
     :return: The processed image.
     """
     if not self.normalize_zero_mean_unit_variance:
         random_lambda = float_uniform(0.9, 1.1)
         image = change_gamma_unnormalized(image, random_lambda)
         output = ShiftScaleClamp(shift=0,
                                  scale=1 / 2048,
                                  random_shift=self.random_intensity_shift,
                                  random_scale=self.random_intensity_scale,
                                  clamp_min=-1.0,
                                  clamp_max=1.0)(image)
     else:
         random_lambda = float_uniform(0.9, 1.1)
         image = change_gamma_unnormalized(image, random_lambda)
         output = normalize_zero_mean_unit_variance(image)
     return output
Ejemplo n.º 12
0
    def all_generators_post_processing_random_np(self, generators_dict):
        """
        Randomly augments images in the generators_dict according to the parameters.
        :param generators_dict: The generators_dict of np arrays.
        :return: The generators_dict with randomly moved np arrays.
        """
        image = generators_dict['image']
        frame_axis = self.video_frame_stack_axis
        random_move_x = float_uniform(-self.image_size[0] * self.random_move,
                                      self.image_size[0] * self.random_move)
        random_move_y = float_uniform(-self.image_size[1] * self.random_move,
                                      self.image_size[1] * self.random_move)
        for i in range(image.shape[frame_axis]):
            displacement_x = (i / image.shape[frame_axis]) * random_move_x
            displacement_y = (i / image.shape[frame_axis]) * random_move_y
            displacement_x += float_uniform(
                -self.image_size[0] * self.random_jiggle,
                self.image_size[0] * self.random_jiggle)
            displacement_y += float_uniform(
                -self.image_size[1] * self.random_jiggle,
                self.image_size[1] * self.random_jiggle)
            displacement_x = int(displacement_x)
            displacement_y = int(displacement_y)
            if displacement_x == 0 and displacement_y == 0:
                continue
            for key in generators_dict.keys():
                if len(generators_dict[key].shape) == 4:
                    if key == 'image':
                        generators_dict[key][:, i:i + 1, :, :] = roll_with_pad(
                            generators_dict[key][:, i:i + 1, :, :],
                            [0, 0, displacement_y, displacement_x],
                            mode='reflect')
                    else:
                        generators_dict[key][:, i:i + 1, :, :] = roll_with_pad(
                            generators_dict[key][:, i:i + 1, :, :],
                            [0, 0, displacement_y, displacement_x],
                            mode='constant')

        return generators_dict
    def get(self, **kwargs):
        """
        Returns the sitk transform based on the given parameters.
        :param kwargs: Must contain either 'image', or 'input_size' and 'input_spacing', which define the input image physical space.
        :return: The sitk.AffineTransform().
        """
        input_size, input_spacing = self.get_image_size_spacing(**kwargs)
        remove_border = self.remove_border
        if remove_border is None:
            remove_border = [0] * self.dim

        current_offset = [(input_size[i] * input_spacing[i] - remove_border[i]) * float_uniform(-self.random_factor[i], self.random_factor[i])
                          for i in range(len(self.random_factor))]
        return self.get_translate_transform(self.dim, current_offset)
Ejemplo n.º 14
0
 def get(self, **kwargs):
     """
     Returns the sitk transform based on the given parameters.
     :param kwargs: Not used.
     :return: The sitk.AffineTransform().
     """
     scale = 1.0 + float_uniform(-self.random_scale, self.random_scale)
     current_scale = []
     for i in range(self.dim):
         if i in self.ignore_dim:
             current_scale.append(1.0)
         else:
             current_scale.append(scale)
     return self.get_scale_transform(self.dim, current_scale)
Ejemplo n.º 15
0
 def get(self, **kwargs):
     """
     Apply a random rotation transform to an input image.
     
     :param input_image: ITK image
         the input image
     :param random_angles: float, list of float
         random rotation angle ranges (in radians) for each dimension
     :param kwargs:
         chain: if True returns the transform instead of the output image (default=False)
     :return:
     """
     if self.dim == 2:
         self.current_angles = [float_uniform(-self.random_angles[0], self.random_angles[0])]
     elif self.dim == 3:
         # rotate by same random angle in each dimension
         if len(self.random_angles) == 1:
             angle = float_uniform(-self.random_angles[0], self.random_angles[0])
             self.current_angles = [angle] * self.dim
         else:
             # rotate by individual angle in each dimension
             self.current_angles = [float_uniform(-self.random_angles[i], self.random_angles[i])
                                    for i in range(self.dim)]
     return self.get_rotation_transform(self.current_angles)
Ejemplo n.º 16
0
 def get(self, **kwargs):
     """
     Returns the sitk transform based on the given parameters.
     :param kwargs: Must contain either 'image', or 'input_size' and 'input_spacing', which define the input image physical space.
     :return: The sitk.AffineTransform().
     """
     # TODO check, if direction or origin are needed
     # TODO right now it only works when direction is np.eye and origin is np.zeros
     input_size, input_spacing, input_direction, input_origin = self.get_image_size_spacing_direction_origin(
         **kwargs)
     current_offset = [
         (input_size[i] * input_spacing[i] - self.remove_border[i]) *
         float_uniform(-self.random_factor[i], self.random_factor[i])
         for i in range(len(self.random_factor))
     ]
     return self.get_translate_transform(self.dim, current_offset)
Ejemplo n.º 17
0
 def postprocessing_random(self, image):
     """
     Performs random augmentations of a grayscale image. Augmentation consists of random gamma correction,
     random intensity shift/scale per video and per frame.
     :param image: The grayscale image to augment.
     :return: The augmented grayscale image.
     """
     random_lambda = float_uniform(0.6, 1.4)
     image = change_gamma_unnormalized(image, random_lambda)
     image = ShiftScaleClamp(random_shift=0.65, random_scale=0.65)(image)
     if len(image.shape) == 4:
         for i in range(image.shape[self.video_frame_stack_axis]):
             current_slice = [slice(None), slice(None)]
             current_slice.insert(self.video_frame_stack_axis, slice(i, i + 1))
             image[tuple(current_slice)] = ShiftScaleClamp(random_shift=0.1, random_scale=0.1)(image[tuple(current_slice)])
     return image
Ejemplo n.º 18
0
    def get(self, **kwargs):
        """
        Deform the image randomly with a given set of parameters using ITK's BSplineTransform.

        :param input_image: ITK image
            the input image
        :param grid_nodes: list of ints
            the number of nodes per dimension on the output space
        :param deform_range: float, list of floats
            random deformation ranges (in mm)
        :param spline_order: int, default is same as image dimension
            the order of the b-spline
        :param output_size: list
            the output size of the image (required to compute transformations)
        :param direction: list
            the output direction of the image (transform matrix)
        :param kwargs:
            chain: if True returns the transform instead of the output image (default=False)
        :return:
        """
        input_image = kwargs.get('image')
        input_size = input_image.GetSize()
        input_spacing = input_image.GetSpacing()

        origin = [
            -input_size[i] * input_spacing[i] * 0.5 for i in range(self.dim)
        ]
        physical_dimensions = [
            input_size[i] * input_spacing[i] for i in range(self.dim)
        ]

        # get the transform params
        current_transformation = self.get_deformation_transform(
            self.grid_nodes, origin, physical_dimensions, self.spline_order)

        # define the displacement range in mm per control point
        # modify the parameters
        deform_params = [
            float_uniform(-self.deformation_value, self.deformation_value)
            for _ in current_transformation.GetParameters()
        ]
        # set them back to the transform
        current_transformation.SetParameters(deform_params)
        # output spacing is input spacing: --> assumed (1, 1[, 1])!

        return current_transformation
Ejemplo n.º 19
0
    def get(self, **kwargs):
        """
        Apply a random translation, in each dimension specified in the random offset.
        The offset will be computed a uniform distribution 1+[-random_offset, random_offset].

        :param input_image: ITK image
        :param random_offset: float, list of floats
            ranges for uniform random offset in each dimension
        :param kwargs:
            chain: if True returns the transform instead of the output image (default=False)
        :return:
        """
        # translate by individual offset in each dimension
        current_offset = [
            1.0 + float_uniform(-self.random_offset[i], self.random_offset[i])
            for i in range(len(self.random_offset))
        ]
        return self.get_translate_transform(current_offset)
 def get(self, **kwargs):
     """
     Returns the sitk transform based on the given parameters.
     :param kwargs: Must contain 'start' and 'extent' which define the bounding box in physical coordinates.
     :return: The sitk.AffineTransform().
     """
     output_size = kwargs.get('output_size', self.output_size)
     output_spacing = kwargs.get('output_spacing', self.output_spacing)
     extent = kwargs.get('extent', self.output_spacing)
     max_translation = [
         extent[i] - output_size[i] * output_spacing[i]
         for i in range(self.dim)
     ]
     current_offset = [
         np.maximum(0.0, max_translation[i]) * float_uniform(-0.5, 0.5)
         for i in range(self.dim)
     ]
     return self.get_translate_transform(self.dim, current_offset)
 def get(self, **kwargs):
     """
     Returns the sitk transform based on the given parameters.
     :param kwargs: Must contain either 'image', or 'input_size' and 'input_spacing', which define the input image physical space.
     :return: The sitk.AffineTransform().
     """
     output_size = kwargs.get('output_size', self.output_size)
     output_spacing = kwargs.get('output_spacing', self.output_spacing)
     # TODO check, if direction or origin are really needed
     input_size, input_spacing, input_direction, input_origin = self.get_image_size_spacing_direction_origin(
         **kwargs)
     assert np.allclose(
         input_direction,
         np.eye(self.dim).flatten()
     ), 'this transformation only works for eye direction, is: ' + input_direction
     max_translation = [
         input_size[i] * input_spacing[i] -
         output_size[i] * output_spacing[i] for i in range(self.dim)
     ]
     current_offset = [
         np.maximum(0.0, max_translation[i]) * float_uniform(-0.5, 0.5)
         for i in range(self.dim)
     ]
     return self.get_translate_transform(self.dim, current_offset)
 def update(self, **kwargs):
     if self.random_shift is not None:
         self.current_random_shift = random.float_uniform(-self.random_shift, self.random_shift)
     if self.random_scale is not None:
         self.current_random_scale = 1 + random.float_uniform(-self.random_scale, self.random_scale)
 def rotate_all_random(self):
     for i in range(self.dim):
         for j in range(i + 1, self.dim):
             plane = Plane(i, j, float_uniform(0, 2 * math.pi))
             self.planes.append(plane)
    def data_generators(self, iterator, datasources, transformation,
                        image_post_processing,
                        random_translation_single_landmark, image_size):
        """
        Returns the data generators that process one input. See datasources() for dict values.
        :param datasources: datasources dict.
        :param transformation: transformation.
        :param image_post_processing: The np postprocessing function for the image data generator.
        :return: A dict of data generators.
        """
        generators_dict = {}
        generators_dict['image'] = ImageGenerator(
            self.dim,
            image_size,
            self.image_spacing,
            interpolator='linear',
            post_processing_np=image_post_processing,
            data_format=self.data_format,
            resample_default_pixel_value=self.image_default_pixel_value,
            name='image',
            parents=[datasources['image'], transformation])
        if self.generate_landmark_mask:
            generators_dict['landmark_mask'] = ImageGenerator(
                self.dim,
                image_size,
                self.image_spacing,
                interpolator='nearest',
                data_format=self.data_format,
                resample_default_pixel_value=0,
                name='landmark_mask',
                parents=[datasources['landmark_mask'], transformation])
        if self.generate_labels or self.generate_single_vertebrae:
            generators_dict['labels'] = ImageGenerator(
                self.dim,
                image_size,
                self.image_spacing,
                interpolator='nearest',
                post_processing_np=self.split_labels,
                data_format=self.data_format,
                name='labels',
                parents=[datasources['labels'], transformation])
        if self.generate_heatmaps or self.generate_spine_heatmap:
            generators_dict['heatmaps'] = LandmarkGeneratorHeatmap(
                self.dim,
                image_size,
                self.image_spacing,
                sigma=self.heatmap_sigma,
                scale_factor=1.0,
                normalize_center=True,
                data_format=self.data_format,
                name='heatmaps',
                parents=[datasources['landmarks'], transformation])
        if self.generate_landmarks:
            generators_dict['landmarks'] = LandmarkGenerator(
                self.dim,
                image_size,
                self.image_spacing,
                data_format=self.data_format,
                name='landmarks',
                parents=[datasources['landmarks'], transformation])
        if self.generate_single_vertebrae_heatmap:
            single_landmark = LambdaNode(
                lambda id_dict, landmarks: landmarks[int(id_dict[
                    'landmark_id']):int(id_dict['landmark_id']) + 1],
                name='single_landmark',
                parents=[iterator, datasources['landmarks']])
            if random_translation_single_landmark:
                single_landmark = LambdaNode(
                    lambda l: [
                        Landmark(
                            l[0].coords + float_uniform(
                                -self.random_translation_single_landmark, self.
                                random_translation_single_landmark, [self.dim
                                                                     ]), True)
                    ],
                    name='single_landmark_translation',
                    parents=[single_landmark])
            generators_dict['single_heatmap'] = LandmarkGeneratorHeatmap(
                self.dim,
                image_size,
                self.image_spacing,
                sigma=self.heatmap_sigma,
                scale_factor=1.0,
                normalize_center=True,
                data_format=self.data_format,
                name='single_heatmap',
                parents=[single_landmark, transformation])
        if self.generate_single_vertebrae:
            if self.data_format == 'channels_first':
                generators_dict['single_label'] = LambdaNode(
                    lambda id_dict, images: images[int(id_dict[
                        'landmark_id']) + 1:int(id_dict['landmark_id']) + 2,
                                                   ...],
                    name='single_label',
                    parents=[iterator, generators_dict['labels']])
            else:
                generators_dict['single_label'] = LambdaNode(
                    lambda id_dict, images: images[...,
                                                   int(id_dict['landmark_id'])
                                                   + 1:int(id_dict[
                                                       'landmark_id']) + 2],
                    name='single_label',
                    parents=[iterator, generators_dict['labels']])
        if self.generate_spine_heatmap:
            generators_dict['spine_heatmap'] = LambdaNode(
                lambda images: gaussian(np.sum(images,
                                               axis=0 if self.data_format ==
                                               'channels_first' else -1,
                                               keepdims=True),
                                        sigma=self.spine_heatmap_sigma),
                name='spine_heatmap',
                parents=[generators_dict['heatmaps']])

        return generators_dict
    def data_generators(self, iterator, datasources, transformation, image_post_processing, random_translation_single_landmark, image_size, crop=False):
        """
        Returns the data generators that process one input. See datasources() for dict values.
        :param datasources: datasources dict.
        :param transformation: transformation.
        :param image_post_processing: The np postprocessing function for the image data generator.
        :return: A dict of data generators.
        """
        generators_dict = {}
        kwparents = {'output_size': image_size}
        image_datasource = datasources['image'] if not crop else LambdaNode(self.landmark_based_crop, name='image_cropped', kwparents={'image': datasources['image'], 'landmarks': datasources['landmarks']})
        generators_dict['image'] = ImageGenerator(self.dim,
                                                  None,
                                                  self.image_spacing,
                                                  interpolator='linear',
                                                  post_processing_np=image_post_processing,
                                                  data_format=self.data_format,
                                                  resample_default_pixel_value=self.image_default_pixel_value,
                                                  np_pixel_type=self.output_image_type,
                                                  name='image',
                                                  parents=[image_datasource, transformation],
                                                  kwparents=kwparents)
        # generators_dict['image'] = ImageGenerator(self.dim,
        #                                           None,
        #                                           self.image_spacing,
        #                                           interpolator='linear',
        #                                           post_processing_np=image_post_processing,
        #                                           data_format=self.data_format,
        #                                           resample_default_pixel_value=self.image_default_pixel_value,
        #                                           np_pixel_type=self.output_image_type,
        #                                           name='image_cropped',
        #                                           parents=[LambdaNode(self.landmark_based_crop, name='image_cropped', kwparents={'image': datasources['image'], 'landmarks': datasources['landmarks']}), transformation],
        #                                           kwparents=kwparents)
        if self.generate_landmark_mask:
            generators_dict['landmark_mask'] = ImageGenerator(self.dim,
                                                              None,
                                                              self.image_spacing,
                                                              interpolator='nearest',
                                                              data_format=self.data_format,
                                                              resample_default_pixel_value=0,
                                                              name='landmark_mask',
                                                              parents=[datasources['landmark_mask'], transformation],
                                                              kwparents=kwparents)
        if self.generate_labels:
            generators_dict['labels'] = ImageGenerator(self.dim,
                                                       None,
                                                       self.image_spacing,
                                                       interpolator='nearest',
                                                       post_processing_np=self.split_labels,
                                                       data_format=self.data_format,
                                                       name='labels',
                                                       parents=[datasources['labels'], transformation],
                                                       kwparents=kwparents)
        if self.generate_heatmaps or self.generate_spine_heatmap:
            generators_dict['heatmaps'] = LandmarkGeneratorHeatmap(self.dim,
                                                                   None,
                                                                   self.image_spacing,
                                                                   sigma=self.heatmap_sigma,
                                                                   scale_factor=1.0,
                                                                   normalize_center=True,
                                                                   data_format=self.data_format,
                                                                   name='heatmaps',
                                                                   parents=[datasources['landmarks'], transformation],
                                                                   kwparents=kwparents)
        if self.generate_landmarks:
            generators_dict['landmarks'] = LandmarkGenerator(self.dim,
                                                             None,
                                                             self.image_spacing,
                                                             data_format=self.data_format,
                                                             name='landmarks',
                                                             parents=[datasources['landmarks'], transformation],
                                                             kwparents=kwparents)
        if self.generate_single_vertebrae_heatmap:
            single_landmark = LambdaNode(lambda id_dict, landmarks: landmarks[int(id_dict['landmark_id']):int(id_dict['landmark_id']) + 1],
                                         name='single_landmark',
                                         parents=[iterator, datasources['landmarks']])
            if random_translation_single_landmark:
                single_landmark = LambdaNode(lambda l: [Landmark(l[0].coords + float_uniform(-self.random_translation_single_landmark, self.random_translation_single_landmark, [self.dim]), True)],
                                             name='single_landmark_translation',
                                             parents=[single_landmark])
            generators_dict['single_heatmap'] = LandmarkGeneratorHeatmap(self.dim,
                                                                         None,
                                                                         self.image_spacing,
                                                                         sigma=self.single_heatmap_sigma,
                                                                         scale_factor=1.0,
                                                                         normalize_center=True,
                                                                         data_format=self.data_format,
                                                                         np_pixel_type=self.output_image_type,
                                                                         name='single_heatmap',
                                                                         parents=[single_landmark, transformation],
                                                                         kwparents=kwparents)
        if self.generate_single_vertebrae:
            if self.generate_labels:
                if self.data_format == 'channels_first':
                    generators_dict['single_label'] = LambdaNode(lambda id_dict, images: images[int(id_dict['landmark_id']) + 1:int(id_dict['landmark_id']) + 2, ...],
                                                                 name='single_label',
                                                                 parents=[iterator, generators_dict['labels']])
                else:
                    generators_dict['single_label'] = LambdaNode(lambda id_dict, images: images[..., int(id_dict['landmark_id']) + 1:int(id_dict['landmark_id']) + 2],
                                                                 name='single_label',
                                                                 parents=[iterator, generators_dict['labels']])
            else:
                labels_unsmoothed = ImageGenerator(self.dim,
                                                   None,
                                                   self.image_spacing,
                                                   interpolator='nearest',
                                                   post_processing_np=None,
                                                   data_format=self.data_format,
                                                   name='labels_unsmoothed',
                                                   parents=[datasources['labels'], transformation],
                                                   kwparents=kwparents)
                generators_dict['single_label'] = LambdaNode(lambda id_dict, labels: self.split_and_smooth_single_label(labels, int(id_dict['landmark_id'])),
                                                             name='single_label',
                                                             parents=[iterator, labels_unsmoothed])
        if self.generate_spine_heatmap:
            generators_dict['spine_heatmap'] = LambdaNode(lambda images: normalize(gaussian(np.sum(images, axis=0 if self.data_format == 'channels_first' else -1, keepdims=True), sigma=self.spine_heatmap_sigma), out_range=(0, 1)),
                                                          name='spine_heatmap',
                                                          parents=[generators_dict['heatmaps']])

        return generators_dict