예제 #1
0
def generate_sequential_augmenter(width: int = 512,
                                  height: int = 512) -> iaa.Sequential:
    seq = iaa.Sequential(
        [
            # Small gaussian blur with random sigma between 0 and 0.5.
            # But we only blur about 50% of all images.
            iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 0.5))),
            # Apply affine transformations to each image.
            # Scale/zoom them, otate them and shear them.
            iaa.Sometimes(
                0.5,
                iaa.Affine(scale={
                    "x": (0.8, 1.2),
                    "y": (0.8, 1.2)
                },
                           rotate=(-25, 25),
                           shear=(-8, 8))),
            # Crops to a given size, uniformly and skipping
            # 10% of the image in all edges
            iaa.size.CropToFixedSize(width,
                                     height,
                                     position=(iap.Uniform(
                                         0.1, 0.9), iap.Uniform(0.1, 0.9))),
            iaa.Fliplr(0.5),  # horizontal flips
            iaa.Flipud(0.5),  # vertical flips
            # Strengthen or weaken the contrast in each image.
            iaa.LinearContrast((0.85, 1.15)),
            # Make some images brighter and some darker.
            # In 10% of all cases, we sample the multiplier once per channel,
            # which can end up changing the color of the images.
            iaa.Multiply((0.9, 1.1), per_channel=0.2)
        ],
        random_order=False)  # apply augmenters in the explicit order
    return (seq)
예제 #2
0
def example_unusual_distributions():
    print("Example: Unusual Distributions")
    from imgaug import augmenters as iaa
    from imgaug import parameters as iap
    images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)

    # Blur by a value sigma which is sampled from a uniform distribution
    # of range 0.1 <= x < 3.0.
    # The convenience shortcut for this is: iaa.GaussianBlur((0.1, 3.0))
    blurer = iaa.GaussianBlur(iap.Uniform(0.1, 3.0))
    images_aug = blurer.augment_images(images)

    # Blur by a value sigma which is sampled from a normal distribution N(1.0, 0.1),
    # i.e. sample a value that is usually around 1.0.
    # Clip the resulting value so that it never gets below 0.1 or above 3.0.
    blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(1.0, 0.1), 0.1, 3.0))
    images_aug = blurer.augment_images(images)

    # Same again, but this time the mean of the normal distribution is not constant,
    # but comes itself from a uniform distribution between 0.5 and 1.5.
    blurer = iaa.GaussianBlur(
        iap.Clip(iap.Normal(iap.Uniform(0.5, 1.5), 0.1), 0.1, 3.0))
    images_aug = blurer.augment_images(images)

    # Use for sigma one of exactly three allowed values: 0.5, 1.0 or 1.5.
    blurer = iaa.GaussianBlur(iap.Choice([0.5, 1.0, 1.5]))
    images_aug = blurer.augment_images(images)

    # Sample sigma from a discrete uniform distribution of range 1 <= sigma <= 5,
    # i.e. sigma will have any of the following values: 1, 2, 3, 4, 5.
    blurer = iaa.GaussianBlur(iap.DiscreteUniform(1, 5))
    images_aug = blurer.augment_images(images)
예제 #3
0
def YOLO():
    """
    Data augmentation model for YOLOv3 training
    """
    return iaa.Sequential([
        iaa.KeepSizeByResize(
            iaa.Affine(
                scale=iap.Normal(1, 0.125),
                translate_percent=0.1,
                cval=128,
            )),
        iaa.Fliplr(0.5),
        iaa.Resize({
            "height": iap.Normal(1, 0.1),
            "width": iap.Normal(1, 0.1)
        }),
        iaa.Resize({
            "longer-side": 416,
            "shorter-side": "keep-aspect-ratio"
        }),
        iaa.PadToFixedSize(416, 416, pad_cval=128),
        iaa.MultiplyHueAndSaturation(mul_hue=iap.Uniform(0, 2),
                                     mul_saturation=iap.Uniform(1 / 1.5, 1.5)),
        iaa.AssertShape((None, 416, 416, 3)),
    ])
예제 #4
0
def someAug():
    bg = iap.Uniform(16, 18)
    #Creating a series of augmentations
    shearXY = iaa.Sequential([
        iaa.ShearY(iap.Uniform(-10, 10), cval=bg),
        iaa.ShearX(iap.Uniform(-10, 10), cval=bg)
    ])
    rotate = iaa.Rotate(rotate=iap.Choice([-30, -15, 15, 30],
                                          p=[0.25, 0.25, 0.25, 0.25]),
                        cval=bg)

    pwAff = iaa.PiecewiseAffine(scale=(0.01, 0.06), cval=bg)

    affine = iaa.Affine(scale={
        "x": iap.Uniform(1.1, 1.2),
        "y": iap.Uniform(1.1, 1.2)
    },
                        cval=bg)

    noise = iaa.AdditiveGaussianNoise(loc=0, scale=(0, 0.025 * 255))
    #Using SomeOf to randomly select some augmentations
    someAug = iaa.SomeOf(iap.Choice([2, 3, 4], p=[1 / 3, 1 / 3, 1 / 3]),
                         [affine, shearXY, pwAff, rotate, noise],
                         random_order=True)
    return someAug
def augment(img, steering_angle):
  # Flip - odbicie lustrzane
  if random.random() > 0.5:
    img = img[:, ::-1, :]
    steering_angle = -steering_angle
  #blur - rozmazanie
  blurer = iaa.GaussianBlur(iap.Uniform(0.1, 1.0))
  img = blurer.augment_image(img)
  #shuffle
  ColorShuffle = iaa.ChannelShuffle(p=0.7)
  img = ColorShuffle.augment_image(img)
  #SuperPixels
  superpixel = iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))
  img = superpixel.augment_image(img)
  #Fog
  Clouds = iaa.Clouds()
  img = Clouds.augment_image(img)
  #Snowflakes
  # Snowflakes = iaa.Snowflakes(flake_size=(0.1, 0.4), speed=(0.01, 0.05))
  # img = Snowflakes.augment_image(img)
  #Translate
  tx = random.randint(-20,20)
  translater = iaa.Affine(translate_px = {"x":tx}, mode = 'edge')
  img = translater.augment_image(img)
  steering_angle += tx*0.02
  
  return img, steering_angle
예제 #6
0
    def cpu_augment(self, imgs, boxes):
        # for bx in boxes:
        #     self.assert_bboxes(bx)
        ia_bb = []
        for n in range(len(imgs)):
            c_boxes = []
            for i in boxes[n]:
                try:
                    c_boxes.append(
                        ia.BoundingBox(x1=i[0], y1=i[1], x2=i[2], y2=i[3]))
                except AssertionError:
                    print('Assertion Error: ', i)
            ia_bb.append(ia.BoundingBoxesOnImage(c_boxes, shape=imgs[n].shape))

        seq = iaa.Sequential([
            iaa.Sometimes(0.5, iaa.AddElementwise((-20, 20), per_channel=1)),
            iaa.Sometimes(0.5,
                          iaa.AdditiveGaussianNoise(scale=(0, 0.10 * 255))),
            iaa.Sometimes(0.5, iaa.Multiply((0.75, 1.25), per_channel=1)),
            iaa.Sometimes(0.5, iaa.MultiplyElementwise((0.75, 1.25))),
            iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0.0, 1.0))),
            iaa.Fliplr(0.5),
            iaa.Sometimes(
                0.95,
                iaa.SomeOf(1, [
                    iaa.CoarseDropout(p=(0.10, 0.25),
                                      size_percent=(0.25, 0.5)),
                    iaa.CoarseDropout(p=(0.0, 0.15), size_percent=(0.1, 0.25)),
                    iaa.Dropout(p=(0, 0.25)),
                    iaa.CoarseSaltAndPepper(p=(0, 0.25),
                                            size_percent=(0.1, 0.2))
                ])),
            iaa.Affine(scale=iap.Choice(
                [iap.Uniform(0.4, 1), iap.Uniform(1, 3)]),
                       rotate=(-180, 180))
        ])
        seq_det = seq.to_deterministic()
        image_b_aug = seq_det.augment_images(imgs)
        bbs_b_aug = seq_det.augment_bounding_boxes(ia_bb)
        bbs_b_aug = [
            b.remove_out_of_image().cut_out_of_image() for b in bbs_b_aug
        ]
        return image_b_aug, [
            np.array([self.bbox_r(j) for j in i.bounding_boxes])
            for i in bbs_b_aug
        ]
예제 #7
0
 def __augmentation_operations(self):
     self.aug_ops = iaa.Sequential([
         self.__sometimes(iaa.Fliplr(1), 0.5),
         self.__sometimes(
             iaa.Affine(scale=iap.Uniform(1.0, 1.2).draw_samples(1)), 0.3),
         self.__sometimes(iaa.AdditiveGaussianNoise(scale=0.05 * 255), 0.2),
         self.__sometimes(
             iaa.OneOf([
                 iaa.CropAndPad(percent=(iap.Uniform(
                     0.0, 0.20).draw_samples(1)[0], iap.Uniform(
                         0.0, 0.20).draw_samples(1)[0]),
                                pad_mode=["constant"],
                                pad_cval=(0, 128)),
                 iaa.Crop(
                     percent=(iap.Uniform(0.0, 0.15).draw_samples(1)[0],
                              iap.Uniform(0.0, 0.15).draw_samples(1)[0]))
             ])),
         self.__sometimes(
             iaa.OneOf([
                 iaa.LogContrast(
                     gain=iap.Uniform(0.9, 1.2).draw_samples(1)),
                 iaa.GammaContrast(
                     gamma=iap.Uniform(1.5, 2.5).draw_samples(1))
             ]))
     ],
                                   random_order=True)
     return None
예제 #8
0
    def run_augmentations(self, image, label, face):
        image, segmaps, bboxes = self.to_imgaug_format(image, label, face)
        img, segmaps, bbs = self.augmentations(image=image,
                                               segmentation_maps=segmaps,
                                               bounding_boxes=bboxes)
        bb = bbs[0]

        max_width = bb.width * 8
        min_width = bb.width * 256 / 144
        average = max_width * 0.65 + min_width * 0.35
        distr = iap.Uniform((min_width, average), (average, max_width))
        width = distr.draw_sample()
        height = width * 144 / 256

        x_min = bb.x2 - width
        x_max = bb.x1

        average = x_min * 0.5 + x_max * 0.5
        distr = iap.Uniform((x_min, average), (average, x_max))
        x = int(distr.draw_sample())

        y_min = bb.y2 - height
        y_max = bb.y1
        average = y_min * 0.3 + y_max * 0.7
        distr = iap.Uniform((y_min, average), (average, y_max))
        y = int(distr.draw_sample())

        x = int(x)
        y = int(y)
        height = int(height)
        width = int(width)

        def augment(img, x, y, height, width):
            img = np.pad(img,
                         ((max(0 - y, 0), max(y + height - image.shape[0], 0)),
                          (max(0 - x, 0), max(x + width - image.shape[1], 0)),
                          (0, 0)))
            x = max(x, 0)
            y = max(y, 0)
            return cv2.resize(img[y:y + height, x:x + width], (256, 144))

        img = augment(img, x, y, height, width)
        segmaps = augment(segmaps.arr.astype(np.uint8), x, y, height, width)
        return self.normalize_and_tensorize(img, segmaps)
예제 #9
0
    def __init__(self,
                 target_size,
                 fill_color=127,
                 mode='letterbox',
                 border='constant',
                 random_state=None):
        import cv2
        super(Resize, self).__init__(random_state=random_state)
        self.target_size = None if target_size is None else np.array(
            target_size)
        self.mode = mode

        import imgaug.parameters as iap
        if fill_color == imgaug.ALL:
            self.fill_color = iap.Uniform(0, 255)
        else:
            self.fill_color = iap.handle_continuous_param(
                fill_color,
                "fill_color",
                value_range=None,
                tuple_to_uniform=True,
                list_to_choice=True)

        self._cv2_border_type_map = {
            'constant': cv2.BORDER_CONSTANT,
            'edge': cv2.BORDER_REPLICATE,
            'linear_ramp': None,
            'maximum': None,
            'mean': None,
            'median': None,
            'minimum': None,
            'reflect': cv2.BORDER_REFLECT_101,
            'symmetric': cv2.BORDER_REFLECT,
            'wrap': cv2.BORDER_WRAP,
            cv2.BORDER_CONSTANT: cv2.BORDER_CONSTANT,
            cv2.BORDER_REPLICATE: cv2.BORDER_REPLICATE,
            cv2.BORDER_REFLECT_101: cv2.BORDER_REFLECT_101,
            cv2.BORDER_REFLECT: cv2.BORDER_REFLECT
        }
        if isinstance(border, six.string_types):
            if border == imgaug.ALL:
                border = [
                    k for k, v in self._cv2_border_type_map.items()
                    if v is not None and isinstance(k, six.string_types)
                ]
            else:
                border = [border]
        if isinstance(border, (list, tuple)):
            from imgaug.parameters import Choice
            border = Choice(border)
        self.border = border
        assert self.mode == 'letterbox', 'thats all folks'
예제 #10
0
 def __init__(self):
     self.fonts_dir = "./fonts"
     # self.fonts_list = ["arial.ttf", "huawenxihei.ttf", "huawensongti.ttf"]          # arial不能再图片上绘制中文
     self.fonts_list = os.listdir(self.fonts_dir)
     self.augment = False
     self.seq = iaa.Sequential(
         [
             # iaa.PerspectiveTransform(scale=0.02, keep_size=True),
             iaa.SomeOf((0, 3), [
                 iaa.GaussianBlur(sigma=iap.Uniform(0, 0.01)),
                 iaa.Add((-20, 20)),
                 iaa.OneOf(
                     [iaa.AverageBlur(k=(0, 1)),
                      iaa.MedianBlur(k=(1, 3))])
             ])
         ],
         random_order=True)
 def init_augmentations(self):
     if self.transform_probability > 0:
         augmentations = iaa.Sometimes(
             self.transform_probability,
             iaa.Sequential([
                 iaa.SomeOf((1, None), [
                     iaa.AddToHueAndSaturation(iap.Uniform(-20, 20),
                                               per_channel=True),
                     iaa.GaussianBlur(sigma=(0, 1.0)),
                     iaa.LinearContrast((0.75, 1.0)),
                     iaa.PiecewiseAffine(scale=(0.01, 0.02), mode='edge'),
                 ],
                            random_order=True)
             ]))
     else:
         augmentations = None
     return augmentations
예제 #12
0
def make_optical_flow_augmenter_large(crop_size):
    h, w = crop_size
    alpha_parameter = iap.Uniform(0, 60)

    def augment(*args, **kwargs):
        alpha = alpha_parameter.draw_sample()
        sigma = alpha / 2.5
        augmenter = iaa.Sequential([
            iaa.Affine(
                scale=iap.Normal(loc=1, scale=0.1),
                translate_percent=iap.Normal(loc=0, scale=0.1),
                shear=iap.Normal(loc=0, scale=5),
                backend='cv2'
            ),
            iaa.Sometimes(0.3, iaa.ElasticTransformation(alpha=alpha, sigma=sigma))
        ], random_order=False)
        return augmenter(*args, **kwargs)
    return augment
예제 #13
0
    def __init__(self, dict_path, fonts_dir, bg_dir):
        self.dict_path, self.fonts_dir, self.bg_dir = dict_path, fonts_dir, bg_dir
        self.char_set, self.char_to_label, self.label_to_char = self.load_dict(dict_path)
        self.fonts_list = self.load_fonts(fonts_dir)
        self.bg_image_list = self.load_background_image(bg_dir)

        self.augment = False
        self.seq = iaa.Sequential([
            # iaa.PerspectiveTransform(scale=0.02, keep_size=True),
            iaa.SomeOf((0, 3), [
                iaa.GaussianBlur(sigma=iap.Uniform(0, 0.01)),
                iaa.Add((-20, 20)),
                iaa.OneOf([
                    iaa.AverageBlur(k=(0, 1)),
                    iaa.MedianBlur(k=(1, 3))
                ])
            ])
        ], random_order=True)
예제 #14
0
def chapter_parameters_introduction():
    ia.seed(1)
    from imgaug import augmenters as iaa
    from imgaug import parameters as iap

    seq = iaa.Sequential([
        iaa.GaussianBlur(sigma=iap.Uniform(0.0, 1.0)),
        iaa.ContrastNormalization(
            iap.Choice([1.0, 1.5, 3.0], p=[0.5, 0.3, 0.2])),
        iaa.Affine(rotate=iap.Normal(0.0, 30),
                   translate_px=iap.RandomSign(iap.Poisson(3))),
        iaa.AddElementwise(iap.Discretize(
            (iap.Beta(0.5, 0.5) * 2 - 1.0) * 64)),
        iaa.Multiply(iap.Positive(iap.Normal(0.0, 0.1)) + 1.0)
    ])

    images = np.array([ia.quokka_square(size=(128, 128)) for i in range(16)])
    images_aug = [seq.augment_image(images[i]) for i in range(len(images))]
    save("parameters", "introduction.jpg", grid(images_aug, cols=4, rows=4))
예제 #15
0
def example_probability_distributions_as_parameters():
    print("Example: Probability Distributions as Parameters")
    import numpy as np
    from imgaug import augmenters as iaa
    from imgaug import parameters as iap

    images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)

    # Blur by a value sigma which is sampled from a uniform distribution
    # of range 10.1 <= x < 13.0.
    # The convenience shortcut for this is: GaussianBlur((10.1, 13.0))
    blurer = iaa.GaussianBlur(10 + iap.Uniform(0.1, 3.0))
    images_aug = blurer(images=images)

    # Blur by a value sigma which is sampled from a gaussian distribution
    # N(1.0, 0.1), i.e. sample a value that is usually around 1.0.
    # Clip the resulting value so that it never gets below 0.1 or above 3.0.
    blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(1.0, 0.1), 0.1, 3.0))
    images_aug = blurer(images=images)
예제 #16
0
def generate_batch_augmented(X, y, batch_size=32, crop_size=130):
    while True:
        batch_X, batch_y = next(generate_batch(X, y, batch_size))
        seq = iaa.Sequential(
            [
                iaa.Fliplr(0.5),
                iaa.Flipud(0.5),
                iaa.Affine(
                    rotate=(-180, 180),  # rotate by -45 to +45 degrees
                    order=[3],  # use nearest neighbour or bilinear interpolation (fast)
                ),
                iaa.CropToFixedSize(crop_size, crop_size),
            ]
        )
        seq_X = iaa.Sometimes(0.5, [iaa.ReplaceElementwise(0.01, iap.Uniform(0, 2000))])

        if batch_X[0].ndim == 2:
            channels = 1
        else:
            channels = batch_X[0].shape[-1]
        batch_aug_X = np.empty((batch_size, crop_size, crop_size, channels))
        batch_aug_y = np.empty((batch_size, crop_size, crop_size, 1))
        for i in range(batch_size):
            seq_det = (
                seq.to_deterministic()
            )  # call this for each batch again, NOT only once at the start
            images_aug = seq_det.augment_image(batch_X[i])
            forces_aug = seq_det.augment_image(batch_y[i])
            images_aug = seq_X.augment_image(images_aug)
            if images_aug.ndim == 2:
                batch_aug_X[i] = np.expand_dims(images_aug, axis=-1)
            else:
                batch_aug_X[i] = images_aug

            if forces_aug.ndim == 2:
                batch_aug_y[i] = np.expand_dims(forces_aug, axis=-1)
            else:
                batch_aug_y[i] = forces_aug

        yield batch_aug_X, batch_aug_y
예제 #17
0
def make_synthetic_prev_mask_complex_mask_augmenter(crop_size):
    h, w = crop_size

    return iaa.Sequential([
        iaa.Sometimes(0.5, iaa.Lambda(func_segmentation_maps=choose_random_objects_mask_augmenter)),
        iaa.Lambda(func_segmentation_maps=morph_close_mask_augmenter),
        iaa.Sometimes(0.3,
            # failed mask
            iaa.OneOf([
                iaa.TotalDropout(1.0),  # fill image
                iaa.Sequential([  # fail mask
                    iaa.OneOf([
                        iaa.Lambda(func_segmentation_maps=make_morph_operation_mask_augmenter(cv2.erode, min_coef=0.2, max_coef=0.5)),
                        iaa.Lambda(func_segmentation_maps=make_morph_operation_mask_augmenter(cv2.dilate, min_coef=0.2, max_coef=0.5)),
                    ]),
                    iaa.Affine(translate_percent=iap.Choice([iap.Uniform(-0.5, -0.2), iap.Uniform(0.2, 0.5)]))
                ])
            ]),

            # normal mask
            iaa.Sequential([
                iaa.Sometimes(0.1, iaa.OneOf([
                    iaa.Lambda(func_segmentation_maps=make_morph_operation_mask_augmenter(cv2.erode)),  # smaller mask
                    iaa.Lambda(func_segmentation_maps=make_morph_operation_mask_augmenter(cv2.dilate)),  # larger mask
                ])),
                iaa.Sometimes(1.0, iaa.Affine(
                    scale=iap.Normal(loc=1, scale=0.02),
                    translate_percent=iap.Normal(loc=0, scale=0.03),
                    shear=iap.Normal(loc=0, scale=1),
                    backend='cv2'
                )),
                iaa.Sometimes(0.1,
                    iaa.ElasticTransformation(alpha=2000, sigma=50)
                ),
                iaa.Sometimes(0.1,
                    iaa.PiecewiseAffine()
                )
            ])
        )
    ], random_order=False)
예제 #18
0
def augment_image(im):

    arr = np.asarray(im, dtype=np.uint8)
    blurer = iaa.GaussianBlur(1 + iap.Uniform(0.1, 3.0))

    seq = iaa.Sequential([
        #iaa.Crop(px=(1, 16), keep_size=False),
        iaa.Fliplr(0.5),
        iaa.GaussianBlur(sigma=(0, 1.0)),
        iaa.ChangeColorTemperature((1100, 10000))
    ])

    #aug = iaa.Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
    #aug = iaa.EdgeDetect(alpha=(0.0, 1.0))
    #aug = iaa.ChangeColorTemperature((1100, 10000))

    aug_arr = seq(images=[arr])[0]
    im2 = Image.fromarray(aug_arr)
    #im2.show()
    #im.show()
    #Image.fromarray(np.hstack((np.array(im),np.array(im2)))).show()
    return im2
예제 #19
0
 def init_augmentations(self):
     if self.transform_probability > 0 and self.use_imgaug:
         augmentations = iaa.Sometimes(
             self.transform_probability,
             iaa.Sequential([
                 iaa.SomeOf((1, None), [
                     iaa.AddToHueAndSaturation(iap.Uniform(-20, 20),
                                               per_channel=True),
                     iaa.GaussianBlur(sigma=(0, 1.0)),
                     iaa.LinearContrast((0.75, 1.0)),
                     iaa.PiecewiseAffine(scale=(0.01, 0.02), mode='edge'),
                 ],
                            random_order=True),
                 iaa.Resize(
                     {
                         "height": (16, self.image_size.height),
                         "width": "keep-aspect-ratio"
                     },
                     interpolation=imgaug.ALL),
             ]))
     else:
         augmentations = None
     return augmentations
예제 #20
0
 def __init__(self):
     Generator.__init__(self)
     self.augment = False
     self.seq = iaa.Sequential(
         [
             # iaa.PerspectiveTransform(scale=0.02, keep_size=True),
             iaa.SomeOf(
                 (2, 4),
                 [  # 每次使用0到3个方式增强
                     iaa.AdditiveGaussianNoise(
                         loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
                     iaa.Add((-5, 5)),
                     iaa.Sometimes(0.5, [iaa.Scale(2.0),
                                         iaa.Scale(0.5)]),
                     iaa.OneOf([
                         iaa.AverageBlur(k=(1, 3)),
                         iaa.MedianBlur(k=(1, 3)),
                         iaa.GaussianBlur(sigma=iap.Uniform(0.01, 0.05)),
                     ]),
                     iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5)
                 ])
         ],
         random_order=True)
예제 #21
0
    def __init__(self, *args, **kwargs):
        self.image_size = kwargs.pop('image_size', None)
        self.image_mode = kwargs.pop('image_mode', 'RGB')
        self.transform_probability = kwargs.pop('transform_probability', 0)
        self.use_imgaug = kwargs.pop('use_imgaug', True)
        self.min_crop_ratio = kwargs.pop('min_crop_ratio', 0.6)
        self.max_crop_ratio = kwargs.pop('max_crop_ratio', 0.9)
        self.crop_always = kwargs.pop('crop_always', False)
        if self.transform_probability > 0 and self.use_imgaug:
            self.augmentations = iaa.Sometimes(
                self.transform_probability,
                iaa.SomeOf((0, None), [
                    iaa.Fliplr(1.0),
                    iaa.AddToHueAndSaturation(iap.Uniform(-20, 20),
                                              per_channel=True),
                    iaa.CropAndPad(percent=(-0.10, 0.10),
                                   pad_mode=["constant", "edge"]),
                ],
                           random_order=True))
        else:
            self.augmentations = None

        super().__init__(*args, **kwargs)
예제 #22
0
 def __init__(self,
              pairs,
              root='.',
              dtype=numpy.float32,
              label_dtype=numpy.int32,
              image_size=None,
              image_mode='RGB',
              transform_probability=0,
              return_dummy_scores=True):
     if isinstance(pairs, six.string_types):
         pairs_path = pairs
         with open(pairs_path) as pairs_file:
             pairs = []
             reader = csv.reader(pairs_file, delimiter='\t')
             for i, pair in enumerate(reader):
                 pairs.append((pair[0], list(map(label_dtype, pair[1:]))))
     self.transform_probability = transform_probability
     if self.transform_probability > 0:
         self.augmentations = iaa.Sometimes(
             self.transform_probability,
             iaa.SomeOf((0, None), [
                 iaa.Fliplr(1.0),
                 iaa.AddToHueAndSaturation(iap.Uniform(-20, 20),
                                           per_channel=True),
                 iaa.ContrastNormalization((0.75, 1.0)),
                 iaa.Multiply((0.8, 1.2), per_channel=0.2),
             ],
                        random_order=True))
     else:
         self.augmentations = None
     self._pairs = pairs
     self._root = root
     self._dtype = dtype
     self._label_dtype = label_dtype
     self.image_size = image_size
     self.image_mode = image_mode
     self.return_dummy_scores = return_dummy_scores
예제 #23
0
def stochastic():
    return iaa.Sequential([
        iaa.GaussianBlur(
            sigma=iap.Uniform(0.0, 1.0)
        ),
        iaa.ContrastNormalization(
            iap.Choice(
                [1.0, 1.5, 3.0],
                p=[0.5, 0.3, 0.2]
            )
        ),
        iaa.Affine(
            rotate=iap.Normal(0.0, 30),
            translate_px=iap.RandomSign(iap.Poisson(3))
        ),
        iaa.AddElementwise(
            iap.Discretize(
                (iap.Beta(0.5, 0.5) * 2 - 1.0) * 64
            )
        ),
        iaa.Multiply(
            iap.Positive(iap.Normal(0.0, 0.1)) + 1.0
        )
    ])
예제 #24
0
def chapter_parameters_arithmetic():
    ia.seed(1)

    # -----------------------
    # Add
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.Uniform(0, 1) + 1,  # identical to: Add(Uniform(0, 1), 1)
        iap.Add(iap.Uniform(0, 1), iap.Choice([0, 1], p=[0.7, 0.3])),
        iap.Normal(0, 1) + iap.Uniform(-5.5, -5) + iap.Uniform(5, 5.5),
        iap.Normal(0, 1) + iap.Uniform(-7, 5) + iap.Poisson(3),
        iap.Add(iap.Normal(-3, 1), iap.Normal(3, 1)),
        iap.Add(iap.Normal(-3, 1), iap.Normal(3, 1), elementwise=True)
    ]
    gridarr = draw_distributions_grid(
        params,
        rows=2,
        sample_sizes=[  # (iterations, samples per iteration)
            (1000, 1000), (1000, 1000), (1000, 1000), (1000, 1000),
            (1, 100000), (1, 100000)
        ])
    save("parameters", "arithmetic_add.jpg", gridarr)

    # -----------------------
    # Multiply
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.Uniform(0, 1) * 2,  # identical to: Multiply(Uniform(0, 1), 2)
        iap.Multiply(iap.Uniform(0, 1), iap.Choice([0, 1], p=[0.7, 0.3])),
        (iap.Normal(0, 1) * iap.Uniform(-5.5, -5)) * iap.Uniform(5, 5.5),
        (iap.Normal(0, 1) * iap.Uniform(-7, 5)) * iap.Poisson(3),
        iap.Multiply(iap.Normal(-3, 1), iap.Normal(3, 1)),
        iap.Multiply(iap.Normal(-3, 1), iap.Normal(3, 1), elementwise=True)
    ]
    gridarr = draw_distributions_grid(
        params,
        rows=2,
        sample_sizes=[  # (iterations, samples per iteration)
            (1000, 1000), (1000, 1000), (1000, 1000), (1000, 1000),
            (1, 100000), (1, 100000)
        ])
    save("parameters", "arithmetic_multiply.jpg", gridarr)

    # -----------------------
    # Divide
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.Uniform(0, 1) / 2,  # identical to: Divide(Uniform(0, 1), 2)
        iap.Divide(iap.Uniform(0, 1), iap.Choice([0, 2], p=[0.7, 0.3])),
        (iap.Normal(0, 1) / iap.Uniform(-5.5, -5)) / iap.Uniform(5, 5.5),
        (iap.Normal(0, 1) * iap.Uniform(-7, 5)) / iap.Poisson(3),
        iap.Divide(iap.Normal(-3, 1), iap.Normal(3, 1)),
        iap.Divide(iap.Normal(-3, 1), iap.Normal(3, 1), elementwise=True)
    ]
    gridarr = draw_distributions_grid(
        params,
        rows=2,
        sample_sizes=[  # (iterations, samples per iteration)
            (1000, 1000), (1000, 1000), (1000, 1000), (1000, 1000),
            (1, 100000), (1, 100000)
        ])
    save("parameters", "arithmetic_divide.jpg", gridarr)

    # -----------------------
    # Power
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.Uniform(0, 1)**2,  # identical to: Power(Uniform(0, 1), 2)
        iap.Clip(iap.Uniform(-1, 1)**iap.Normal(0, 1), -4, 4)
    ]
    gridarr = draw_distributions_grid(params, rows=1)
    save("parameters", "arithmetic_power.jpg", gridarr)
예제 #25
0
        BoundingBoxesOnImage([
            BoundingBox(x1=bounding_boxes[0],
                        y1=bounding_boxes[1],
                        x2=bounding_boxes[2],
                        y2=bounding_boxes[3],
                        label=bounding_boxes[4])
        ],
                             shape=numpy_images[i].shape)
        for i, bounding_boxes in enumerate(list_boxes_denormalized)
    ]

    # create a sequence to apply transformations on images
    seq = iaa.Sequential(
        [
            iaa.Fliplr(0.5),  # horizontal flips
            iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=iap.Uniform(
                0, 0.5))),  # But we only blur about 50% of all images.
            iaa.LinearContrast(
                (0.75,
                 1.5)),  # Strengthen or weaken the contrast in each image.
            iaa.Multiply((0.75, 1.2)),
        ],  # change luminosity
        random_order=True)  # apply augmenters in random order

    # set changes on image randomly and compute augmented images with corresponding labels
    seq_det = seq.to_deterministic()
    images_aug = seq_det.augment_images(numpy_images * ITERATION_NUMBER)
    bbs_aug = seq_det.augment_bounding_boxes(bbs * ITERATION_NUMBER)

    list_new_names = []

    # save augmented images
예제 #26
0
def chapter_parameters_continuous():
    ia.seed(1)

    # -----------------------
    # Normal
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.Normal(0, 1),
        iap.Normal(5, 3),
        iap.Normal(iap.Choice([-3, 3]), 1),
        iap.Normal(iap.Uniform(-3, 3), 1)
    ]
    gridarr = draw_distributions_grid(params)
    save("parameters", "continuous_normal.jpg", gridarr)

    # -----------------------
    # Laplace
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.Laplace(0, 1),
        iap.Laplace(5, 3),
        iap.Laplace(iap.Choice([-3, 3]), 1),
        iap.Laplace(iap.Uniform(-3, 3), 1)
    ]
    gridarr = draw_distributions_grid(params)
    save("parameters", "continuous_laplace.jpg", gridarr)

    # -----------------------
    # ChiSquare
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.ChiSquare(1),
        iap.ChiSquare(3),
        iap.ChiSquare(iap.Choice([1, 5])),
        iap.RandomSign(iap.ChiSquare(3))
    ]
    gridarr = draw_distributions_grid(params)
    save("parameters", "continuous_chisquare.jpg", gridarr)

    # -----------------------
    # Weibull
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.Weibull(0.5),
        iap.Weibull(1),
        iap.Weibull(1.5),
        iap.Weibull((0.5, 1.5))
    ]
    gridarr = draw_distributions_grid(params)
    save("parameters", "continuous_weibull.jpg", gridarr)

    # -----------------------
    # Uniform
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.Uniform(0, 1),
        iap.Uniform(iap.Normal(-3, 1), iap.Normal(3, 1)),
        iap.Uniform([-1, 0], 1),
        iap.Uniform((-1, 0), 1)
    ]
    gridarr = draw_distributions_grid(params)
    save("parameters", "continuous_uniform.jpg", gridarr)

    # -----------------------
    # Beta
    # -----------------------
    from imgaug import parameters as iap
    params = [
        iap.Beta(0.5, 0.5),
        iap.Beta(2.0, 2.0),
        iap.Beta(1.0, 0.5),
        iap.Beta(0.5, 1.0)
    ]
    gridarr = draw_distributions_grid(params)
    save("parameters", "continuous_beta.jpg", gridarr)
예제 #27
0
def main(cfg):

    d = datetime.datetime.now()

    if (cfg.data_type == 'medical'):
        ds_dir = os.path.split(cfg.in_dir)[-1]
    else:
        ds_dir = cfg.data_type

    run_dir = pjoin(cfg.out_dir, 'runs',
                    '{}_{:%Y-%m-%d_%H-%M}'.format(ds_dir, d))

    in_shape = [cfg.in_shape] * 2

    transf = iaa.Sequential([
        # iaa.Invert(0.5) if 'Dataset1' in cfg.in_dir else iaa.Noop(),
        iaa.Noop(),
        iaa.SomeOf(3, [
            iaa.Affine(rotate=iap.Uniform(-15., 15.)),
            iaa.Affine(shear=iap.Uniform(-15., -15.)),
            iaa.Fliplr(1.),
            iaa.Flipud(1.),
            iaa.GaussianBlur(sigma=iap.Uniform(0.0, 0.05))
        ]),
        iaa.Resize(in_shape),
        rescale_augmenter
    ])

    normalization = Normalize(mean=[0.485, 0.456, 0.406],
                              std=[0.229, 0.224, 0.225])

    torch.backends.cudnn.benchmark = True
    # torch.backends.cudnn = True

    model = DarNet()

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    if torch.cuda.device_count() > 1:
        print("Using", torch.cuda.device_count(), "GPUs")
        cfg.batch_size *= torch.cuda.device_count()
        model = torch.nn.DataParallel(model)

    model.to(device)

    if cfg.data_type == 'pascal':
        if (cfg.scratch):
            scratch_path = os.environ['TMPDIR']
            files_to_copy = sorted(glob.glob(pjoin(cfg.in_dir, 'voc*.h5')))
            for f in files_to_copy:
                dest_path = pjoin(scratch_path, os.path.split(f)[-1])
                if (not os.path.exists(dest_path)):
                    print('Copying {} to {}'.format(f, scratch_path))
                    shutil.copyfile(f, dest_path)
                else:
                    print('{} already exists!'.format(dest_path))
            cfg.in_dir = scratch_path
        loader = pascalVOCLoaderPatch(cfg.in_dir,
                                      patch_rel_size=cfg.patch_rel_size,
                                      make_edt=True,
                                      augmentations=transf,
                                      normalization=normalization)
    elif cfg.data_type == 'medical':
        loader = PatchLoader(cfg.in_dir,
                             'hand',
                             fake_len=cfg.fake_len,
                             make_snake=True,
                             length_snake=cfg.length_snake,
                             fix_frames=cfg.frames,
                             augmentation=transf)
    else:
        raise Exception('data-type must be pascal or medical')

    # Creating data indices for training and validation splits:
    validation_split = 1 - cfg.ds_split
    random_seed = 42

    if (cfg.frames is None):
        indices = list(range(len(loader)))
    else:
        indices = np.random.choice(list(range(len(cfg.frames))),
                                   size=cfg.n_patches,
                                   replace=True)

    split = int(np.floor(validation_split * len(indices)))

    np.random.seed(random_seed)
    np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]

    if (not os.path.exists(run_dir)):
        os.makedirs(run_dir)

    pd.DataFrame(train_indices).to_csv(pjoin(run_dir, 'train_sample.csv'))
    pd.DataFrame(val_indices).to_csv(pjoin(run_dir, 'val_sample.csv'))

    train_sampler = SubsetRandomSampler(train_indices)

    # keep validation set consistent accross epochs
    valid_sampler = SubsetSampler(val_indices)

    # each batch will give different locations / augmentations
    train_loader = torch.utils.data.DataLoader(
        loader,
        batch_size=cfg.batch_size,
        num_workers=cfg.n_workers,
        collate_fn=loader.collate_fn,
        worker_init_fn=loader.worker_init_fn,
        drop_last=True,
        sampler=train_sampler)

    # each batch will give same locations / augmentations
    val_loader = torch.utils.data.DataLoader(
        loader,
        num_workers=cfg.n_workers,
        batch_size=cfg.batch_size,
        collate_fn=loader.collate_fn,
        drop_last=True,
        worker_init_fn=loader.worker_init_fn_dummy,
        sampler=valid_sampler)

    # loader for previewing images
    prev_sampler = SubsetRandomSampler(val_indices)
    prev_loader = torch.utils.data.DataLoader(loader,
                                              num_workers=cfg.n_workers,
                                              collate_fn=loader.collate_fn,
                                              sampler=prev_sampler,
                                              batch_size=4,
                                              drop_last=True)

    dataloaders = {
        'train': train_loader,
        'val': val_loader,
        'prev': prev_loader
    }

    cfg.run_dir = run_dir

    # Save cfg
    with open(pjoin(run_dir, 'cfg.yml'), 'w') as outfile:
        yaml.dump(cfg.__dict__, stream=outfile, default_flow_style=False)

    trainer = Trainer(model, dataloaders, cfg, run_dir)

    # train level set branch
    trainer.pretrain()

    return cfg
예제 #28
0
    augmenters = [
        # blur images with a sigma between 0 and 3.0
        iaa.Noop(),
        iaa.GaussianBlur(sigma=(0.5, 2.0)),
        iaa.Add((-50.0, 50.0), per_channel=False),
        iaa.AdditiveGaussianNoise(loc=0,
                                  scale=(0.07 * 255, 0.07 * 255),
                                  per_channel=False),
        iaa.Dropout(p=0.07, per_channel=False),
        iaa.CoarseDropout(p=(0.05, 0.15),
                          size_percent=(0.1, 0.9),
                          per_channel=False),
        iaa.SaltAndPepper(p=(0.05, 0.15), per_channel=False),
        iaa.Salt(p=(0.05, 0.15), per_channel=False),
        iaa.Pepper(p=(0.05, 0.15), per_channel=False),
        iaa.ContrastNormalization(alpha=(iap.Uniform(0.02, 0.03),
                                         iap.Uniform(1.7, 2.1))),
        iaa.ElasticTransformation(alpha=(0.5, 2.0)),
    ]

    seq = iaa.Sequential(iaa.OneOf(augmenters), )

    def get_data_from_tip(tip, batch_size):
        features = []
        labels = []
        descriptions = []
        for i in range(batch_size):
            data = tip.get()
            d, f, l = data
            features.append(f.reshape((224, 224, 1)))
            labels.append(l)
예제 #29
0
def main(cfg):

    d = datetime.datetime.now()

    if (cfg.data_type == 'medical'):
        ds_dir = os.path.split(cfg.data_dir)[-1]
    else:
        ds_dir = cfg.data_type

    run_dir = pjoin(cfg.out_dir, '{}_{:%Y-%m-%d_%H-%M}'.format(ds_dir, d))

    in_shape = [cfg.in_shape] * 2

    transf = iaa.Sequential([
        iaa.Invert(0.5) if 'Dataset1' in cfg.data_dir else iaa.Noop(),
        iaa.SomeOf(3, [
            iaa.Affine(rotate=iap.Uniform(-15., 15.)),
            iaa.Affine(shear=iap.Uniform(-15., -15.)),
            iaa.Fliplr(1.),
            iaa.Flipud(1.),
            iaa.GaussianBlur(sigma=iap.Uniform(0.0, 0.05))
        ]),
        iaa.Resize(in_shape), rescale_augmenter
    ])

    if cfg.data_type == 'pascal':
        loader = pascalVOCLoaderPatch(pjoin(cfg.data_dir, 'VOCdevkit'),
                                      patch_rel_size=cfg.patch_rel_size,
                                      augmentations=transf)
    elif cfg.data_type == 'medical':
        loader = PatchLoader(cfg.data_dir,
                             'hand',
                             fake_len=cfg.fake_len,
                             fix_frames=cfg.frames,
                             augmentation=transf)
    else:
        raise Exception('data-type must be pascal or medical')

    # Creating data indices for training and validation splits:
    validation_split = 1 - cfg.ds_split
    random_seed = 42

    if (cfg.frames is None):
        indices = list(range(len(loader)))
    else:
        indices = np.random.choice(list(range(len(cfg.frames))),
                                   size=cfg.n_patches,
                                   replace=True)

    split = int(np.floor(validation_split * len(indices)))

    np.random.seed(random_seed)
    np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]

    if (not os.path.exists(run_dir)):
        os.makedirs(run_dir)

    pd.DataFrame(train_indices).to_csv(pjoin(run_dir, 'train_sample.csv'))
    pd.DataFrame(val_indices).to_csv(pjoin(run_dir, 'val_sample.csv'))

    train_sampler = SubsetRandomSampler(train_indices)

    # keep validation set consistent accross epochs
    valid_sampler = SubsetSampler(val_indices)

    # each batch will give different locations / augmentations
    train_loader = torch.utils.data.DataLoader(
        loader,
        batch_size=cfg.batch_size,
        num_workers=cfg.n_workers,
        collate_fn=loader.collate_fn,
        worker_init_fn=loader.worker_init_fn,
        sampler=train_sampler)

    # each batch will give same locations / augmentations
    val_loader = torch.utils.data.DataLoader(
        loader,
        num_workers=cfg.n_workers,
        batch_size=cfg.batch_size,
        collate_fn=loader.collate_fn,
        worker_init_fn=loader.worker_init_fn_dummy,
        sampler=valid_sampler)

    # loader for previewing images
    prev_sampler = SubsetRandomSampler(val_indices)
    prev_loader = torch.utils.data.DataLoader(loader,
                                              num_workers=cfg.n_workers,
                                              collate_fn=loader.collate_fn,
                                              sampler=prev_sampler,
                                              batch_size=4,
                                              drop_last=True)

    dataloaders = {
        'train': train_loader,
        'val': val_loader,
        'prev': prev_loader
    }

    model = UNet(in_channels=3,
                 out_channels=1,
                 depth=4,
                 cuda=cfg.cuda,
                 with_coordconv=cfg.coordconv,
                 with_coordconv_r=cfg.coordconv_r,
                 with_batchnorm=cfg.batch_norm)

    cfg.run_dir = run_dir

    # Save cfg
    with open(pjoin(run_dir, 'cfg.yml'), 'w') as outfile:
        yaml.dump(cfg.__dict__, stream=outfile, default_flow_style=False)

    trainer = Trainer(model, dataloaders, cfg, run_dir)
    trainer.train()

    return cfg
예제 #30
0
def main(cfg):

    d = datetime.datetime.now()

    if (cfg.data_type == 'medical'):
        ds_dir = os.path.split(cfg.in_dir)[-1]
    else:
        ds_dir = cfg.data_type

    in_shape = [cfg.in_shape] * 2

    transf = iaa.Sequential([
        iaa.Invert(0.5) if 'Dataset1' in cfg.in_dir else iaa.Noop(),
        iaa.SomeOf(3, [
            iaa.Affine(rotate=iap.Uniform(-15., 15.)),
            iaa.Affine(shear=iap.Uniform(-15., -15.)),
            iaa.Fliplr(1.),
            iaa.Flipud(1.),
            iaa.GaussianBlur(sigma=iap.Uniform(0.0, 0.05))
        ]),
        iaa.Resize(in_shape), rescale_augmenter
    ])

    if cfg.data_type == 'pascal':
        loader = pascalVOCLoaderPatch(
            cfg.in_dir,
            patch_rel_size=cfg.patch_rel_size,
            augmentations=transf)
    elif cfg.data_type == 'medical':
        loader = PatchLoader(
            cfg.in_dir,
            'hand',
            fake_len=cfg.fake_len,
            make_opt_box=False,
            fix_frames=cfg.frames,
            augmentation=transf)
    else:
        raise Exception('data-type must be pascal or medical')

    # Creating data indices for training and validation splits:
    validation_split = 1 - cfg.ds_split

    if (cfg.frames is None):
        indices = list(range(len(loader)))
    else:
        indices = np.random.choice(
            list(range(len(cfg.frames))), size=cfg.n_patches, replace=True)

    split = int(np.floor(validation_split * len(indices)))

    np.random.seed(cfg.seed)
    torch.manual_seed(cfg.seed)

    np.random.shuffle(indices)
    train_indices, val_indices = indices[split:], indices[:split]

    # train_sampler = SubsetRandomSampler(train_indices)
    train_sampler = SubsetSampler(train_indices)

    # keep validation set consistent accross epochs
    valid_sampler = SubsetSampler(val_indices)

    # each batch will give different locations / augmentations
    train_loader = torch.utils.data.DataLoader(
        loader,
        batch_size=cfg.batch_size,
        num_workers=cfg.n_workers,
        # num_workers=0,
        collate_fn=loader.collate_fn,
        worker_init_fn=loader.worker_init_fn,
        sampler=train_sampler)

    # each batch will give same locations / augmentations
    val_loader = torch.utils.data.DataLoader(
        loader,
        num_workers=cfg.n_workers,
        # num_workers=0,
        batch_size=cfg.batch_size,
        collate_fn=loader.collate_fn,
        worker_init_fn=loader.worker_init_fn_dummy,
        sampler=valid_sampler)

    for e in range(2):
        for i, d in enumerate(train_loader):
            print('[train]: epoch {}, batch {}, loc {}, idx {}'.format(e, i, d['loc'], d['label/idx']))
            if(i > 2):
                break

    for e in range(2):
        for i, d in enumerate(val_loader):
            print('[val]: epoch {}, batch {}, loc {}'.format(e, i, d['loc']))
            if(i > 10):
                break

    return cfg