コード例 #1
0
    def __call__(self, images):
        # input_dtypes = copy_dtypes_for_restore(images, force_list=True)
        result = images
        nb_images = len(images)
        # Generate new seeds con
        seeds = ia.current_random_state().randint(0, 10**6, (nb_images, ))
        rs_image = ia.new_random_state(seeds[0])
        per_channel = self.per_channel.draw_sample(random_state=rs_image)

        if per_channel == 1:
            nb_channels = images.shape[1]

            for c in range(nb_channels):
                samples = self.value.draw_samples(
                    (nb_images, 1, 1, 1),
                    random_state=rs_image).astype(np.float32)
                do_assert(samples.all() >= 0)

                result[:, c:(c + 1),
                       ...] = F.add(images[:, c:(c + 1), ...],
                                    torch.cuda.FloatTensor(samples))

        else:
            samples = self.value.draw_samples(
                (nb_images, 1, 1, 1), random_state=rs_image).astype(np.float32)
            do_assert(samples.all() >= 0)

            result = F.add(images, torch.cuda.FloatTensor(samples))

        # image = meta.clip_augmented_image_(image, 0, 255) # TODO make value range more flexible
        # image = meta.restore_augmented_image_dtype_(image, input_dtypes[i])

        return result
コード例 #2
0
    def __call__(self, images):

        nb_images = len(images)  # 120 x 3 x H x W
        seeds = ia.current_random_state().randint(0, 10**6, (nb_images, ))
        rs_image = ia.new_random_state(seeds[0])

        samples = self.p.draw_samples((nb_images, ), random_state=rs_image)
        samples_true_index = np.where(samples == 1)[0]
        samples_false_index = np.where(samples == 0)[0]

        # Check for the zero case , related to pytorch isuee of not accepting zero size array

        if samples_true_index.size > 0:
            samples_true_index = torch.cuda.LongTensor(samples_true_index)
            images_to_transform = torch.index_select(images, 0,
                                                     samples_true_index)
            if samples_false_index.size > 0:

                # images_not_to_transform = torch.index_select(images, 0, torch.cuda.LongTensor(
                #     samples_false_index))

                transformed_imgs = self.transform(images_to_transform)
                images = images.index_copy_(0, samples_true_index,
                                            transformed_imgs)
                return images
                # return torch.cat((self.transform(images_to_transform), images_not_to_transform), 0)

            else:
                return self.transform(images)

        else:
            return images
コード例 #3
0
    def __call__(self, images):

        result = images
        nb_images = len(images)
        # Generate new seeds con
        seeds = ia.current_random_state().randint(0, 10**6, (nb_images, ))
        rs_image = ia.new_random_state(seeds[0])
        per_channel = self.per_channel.draw_sample(random_state=rs_image)

        if per_channel == 1:
            nb_channels = images.shape[1]  #Considering (N C H W)

            for c in range(nb_channels):
                alphas = self.alpha.draw_samples(
                    (nb_images, 1, 1, 1),
                    random_state=rs_image).astype(np.float32)
                do_assert(alphas.all() >= 0)

                result[:, c:(c + 1),
                       ...] = F.contrast(images[:, c:(c + 1), ...],
                                         torch.cuda.FloatTensor(alphas))

        else:
            alphas = self.alpha.draw_samples(
                (nb_images, 1, 1, 1), random_state=rs_image).astype(np.float32)
            do_assert(alphas.all() >= 0)

            #print(alphas)

            result = F.contrast(images, torch.cuda.FloatTensor(alphas))
        return result
コード例 #4
0
    def __call__(self, images):

        # Convert to Grayscale direction

        nb_images = len(images)
        # Generate new seeds con
        seeds = ia.current_random_state().randint(0, 10**6, (nb_images, ))
        rs_image = ia.new_random_state(seeds[0])

        samples = self.alpha.draw_samples(
            (nb_images, 1, 1, 1), random_state=rs_image).astype(np.float32)
        do_assert(samples.all() >= 0)
        result = F.grayscale(images, torch.cuda.FloatTensor(samples))

        return result
コード例 #5
0
    def __call__(self, images):

        result = images
        nb_images = len(images)

        nb_channels, height, width = images[0].shape

        seeds = ia.current_random_state().randint(0, 10**6, (nb_images, ))
        rs_image = ia.new_random_state(seeds[0])

        samples = self.sigma.draw_samples((nb_images, ), random_state=rs_image)

        # note that while gaussian_filter can be applied to all channels
        # at the same time, that should not be done here, because then
        # the blurring would also happen across channels (e.g. red
        # values might be mixed with blue values in RGB)
        for c in range(nb_channels):

            result[:, c:(c + 1), ...] = F.blur(result[:, c:(c + 1), ...],
                                               samples)

        return result
コード例 #6
0
ファイル: parameters.py プロジェクト: zx1425445679/CNN
 def draw_samples(self, size, random_state=None):
     random_state = random_state if random_state is not None else ia.current_random_state(
     )
     return self._draw_samples(size, random_state)
コード例 #7
0
ファイル: parameters.py プロジェクト: aleju/ImageAugmenter
 def draw_samples(self, size, random_state=None):
     random_state = random_state if random_state is not None else ia.current_random_state()
     return self._draw_samples(size, random_state)