Пример #1
0
def chapter_augmenters_translatex():
    fn_start = "geometric/translatex"

    image = ia.quokka(size=(128, 128))

    aug = iaa.TranslateX(px=(-20, 20))
    run_and_save_augseq(
        fn_start + "_absolute.jpg", aug,
        [image for _ in range(4*2)], cols=4, rows=2)

    aug = iaa.TranslateX(percent=(-0.1, 0.1))
    run_and_save_augseq(
        fn_start + "_relative.jpg", aug,
        [image for _ in range(4*2)], cols=4, rows=2)
Пример #2
0
 def shift(image):
     # shift image randomly 10 percent
     aug1 = iaa.TranslateX(percent=(-0.1, 0.1))
     aug2 = iaa.TranslateY(percent=(-0.1, 0.1))
     image_aug = aug1(image=image)
     image_aug = aug2(image=image_aug)
     return image_aug
Пример #3
0
def _lane_argue(*, image, lane_src):
    lines_tuple = [[(float(pt['x']), float(pt['y'])) for pt in line_spec] for line_spec in lane_src['Lines']]
    lss = [ia_LineString(line_tuple_spec) for line_tuple_spec in lines_tuple]

    lsoi = LineStringsOnImage(lss, shape=image.shape)
    color_shift = iaa.OneOf([
        iaa.GaussianBlur(sigma=(0.5, 1.5)),
        iaa.LinearContrast((1.5, 1.5), per_channel=False),
        iaa.Multiply((0.8, 1.2), per_channel=0.2),
        iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.1 * 255), per_channel=0.5),
        iaa.WithColorspace(to_colorspace=iaa.CSPACE_HSV, from_colorspace=iaa.CSPACE_RGB,
                           children=iaa.WithChannels(0, iaa.Multiply((0.7, 1.3)))),
        iaa.WithColorspace(to_colorspace=iaa.CSPACE_HSV, from_colorspace=iaa.CSPACE_RGB,
                           children=iaa.WithChannels(1, iaa.Multiply((0.1, 2)))),
        iaa.WithColorspace(to_colorspace=iaa.CSPACE_HSV, from_colorspace=iaa.CSPACE_RGB,
                           children=iaa.WithChannels(2, iaa.Multiply((0.5, 1.5)))),
    ])
    posion_shift = iaa.SomeOf(4, [
        iaa.Fliplr(),
        iaa.Crop(percent=([0, 0.2], [0, 0.15], [0, 0], [0, 0.15]), keep_size=True),
        iaa.TranslateX(px=(-16, 16)),
        iaa.ShearX(shear=(-15, 15)),
        iaa.Rotate(rotate=(-15, 15))
    ])
    aug = iaa.Sequential([
        iaa.Sometimes(p=0.6, then_list=color_shift),
        iaa.Sometimes(p=0.6, then_list=posion_shift)
    ], random_order=True)
    batch = ia.Batch(images=[image], line_strings=[lsoi])
    batch_aug = list(aug.augment_batches([batch]))[0]  # augment_batches returns a generator
    image_aug = batch_aug.images_aug[0]
    lsoi_aug = batch_aug.line_strings_aug[0]
    lane_aug = [[dict(x=kpt.x, y=kpt.y) for kpt in shapely_line.to_keypoints()] for shapely_line in lsoi_aug]
    return image_aug, dict(Lines=lane_aug)
Пример #4
0
def initialise_augmenter():
    # Horizontal and Vertical Flips (set to 1 as the SomeOf function will choose when to apply these itself)
    horizontal_flip = iaa.Fliplr(1)  #0.5)
    vertical_flip = iaa.Flipud(1)  #0.5)

    # 90, 180 and 270 degree rotations
    rotate_90 = iaa.Affine(rotate=90)
    rotate_180 = iaa.Affine(rotate=180)
    rotate_270 = iaa.Affine(rotate=270)

    # Translations of -10% to 10% of the image's pixels
    translate_x = iaa.TranslateX(percent=(-0.1, 0.1))
    translate_y = iaa.TranslateY(percent=(-0.1, 0.1))

    # Scale the image between 0.75 and 1.1 of the original size
    scale_x = iaa.ScaleX((0.75, 1.1))
    scale_y = iaa.ScaleY((0.75, 1.1))

    # Shear the image between -20 and 20 degrees
    shear_x = iaa.ShearX((-20, 20))
    shear_y = iaa.ShearY((-20, 20))

    augmentation = iaa.SomeOf((0, None), [
        horizontal_flip, vertical_flip,
        iaa.OneOf([rotate_90, rotate_180, rotate_270]), translate_x,
        translate_y, scale_x, scale_y, shear_x, shear_y
    ],
                              random_order=True)

    return augmentation
Пример #5
0
def shiftX(shift_amount, input_path, output_path, image_count):
  images = []
  labels = []

  for img_path in range(image_count):
    img = imageio.imread(input_path + '/images/' + str(img_path) + '.png')
    images.append(img) 

    lbl = imageio.imread(input_path + '/labels/' + str(img_path) + '.png')
    labels.append(lbl)
  
  seq = iaa.Sequential(
    [

        iaa.TranslateX(px=(shift_amount))

    ]
  )

  images_aug = seq(images=images)
  labels_aug = seq(images=labels)

  path = os.path.join(output_path, 'images') 
  os.mkdir(path) 

  path = os.path.join(output_path, 'labels') 
  os.mkdir(path)

  for indx, i in enumerate(images_aug):
      imageio.imwrite(output_path + '/images/'  + 'shifted'+ '_' + str(indx) + '.png', i)

  for indx, i in enumerate(labels_aug):
      imageio.imwrite(output_path + '/labels/'  + 'shifted'+ '_' + str(indx) + '.png', i)

  print("Shift results were saved given directory.")
 def trim(self, img, percent=0.1, flag='x'):
     if flag == 'x':
         aug = iaa.TranslateX(percent=percent)
         img = aug(images=[img])
     else:
         aug = iaa.TranslateY(percent=percent)
         img = aug(images=[img])
     return img[0]
def augmentation():
    train_images_horiz, train_images_verti, train_labels, test_images_horiz, test_images_verti, test_labels, val_images_horiz, val_images_verti, val_labels = list_input_network(
    )

    seq1 = iaa.Sequential([iaa.Fliplr(1)])

    seq2 = iaa.Sequential([iaa.Flipud(1)])

    seq3 = iaa.Sequential([iaa.Rotate((-45, 45))])

    seq4 = iaa.Sequential([iaa.TranslateX(px=(-20, 20))])

    seq5 = iaa.Sequential([iaa.CropToFixedSize(width=30, height=30)])

    new_train_images = train_images_horiz
    new_train_labels = train_labels
    """ different sequences of data augmentation applied """
    images_aug = seq1(images=train_images_horiz)
    new_train_images = np.concatenate((new_train_images, images_aug))
    new_train_labels = np.concatenate((new_train_labels, train_labels))

    images_aug = seq2(images=train_images_horiz)
    new_train_images_horiz = np.concatenate((new_train_images, images_aug))
    new_train_labels = np.concatenate((new_train_labels, train_labels))

    images_aug = seq3(images=train_images_horiz)
    new_train_images = np.concatenate((new_train_images, images_aug))
    new_train_labels = np.concatenate((new_train_labels, train_labels))

    images_aug = seq4(images=train_images_horiz)
    new_train_images_horiz = np.concatenate((new_train_images, images_aug))
    new_train_labels = np.concatenate((new_train_labels, train_labels))

    #new_train_images = train_images_verti
    """ same on vertical images """
    """
    images_aug = seq1(images=train_images_verti)
    new_train_images = np.concatenate((new_train_images, images_aug))

    images_aug = seq2(images=train_images_verti)
    new_train_images_verti = np.concatenate((new_train_images, images_aug))

    images_aug = seq3(images=train_images_verti)
    new_train_images = np.concatenate((new_train_images, images_aug))

    images_aug = seq4(images=train_images_verti)
    new_train_images_verti = np.concatenate((new_train_images, images_aug))
    """

    # images_aug = seq5(images=train_images)
    # new_train_images = np.concatenate((new_train_images, images_aug))
    # new_train_labels = np.concatenate((new_train_labels, train_labels))

    #return new_train_images_horiz, new_train_images_verti, new_train_labels
    return new_train_images_horiz, new_train_labels
def dictTranslateY(baseImageListFunc, baseMaskListFunc, fullImageListFunc, segmapListFunc):
    print('TranslateY, starting number of images:', len(segmapListFunc))
    translateY_x00percent = 2
    translateY = iaa.TranslateX(percent=(-0.2, 0.2), mode="reflect")
    translateY._mode_segmentation_maps = "reflect"
    alteredImageListFunc, alteredMaskListFunc = expandList(baseImageListFunc, baseMaskListFunc, translateY_x00percent)
    (alteredImageListFunc, alteredMaskListFunc) = translateY(images=alteredImageListFunc,
                                                             segmentation_maps=alteredMaskListFunc)

    fullImageListFunc.extend(alteredImageListFunc)
    segmapListFunc.extend(alteredMaskListFunc)
    return fullImageListFunc, segmapListFunc
Пример #9
0
def train(args):
    # augmentations
    transforms = iaa.Sequential([
        iaa.Rotate((-15., 15.)),
        iaa.TranslateX(percent=(-0.05, 0.05)),
        iaa.TranslateY(percent=(-0.05, 0.05)),
        iaa.Affine(shear=(-50, 50)),
        iaa.Affine(scale=(0.8, 1.2)),
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5)
    ])

    # load data and create data loaders
    train_set = BiONetDataset(args.train_data,
                              'monuseg',
                              batchsize=args.batch_size,
                              steps=args.steps,
                              transforms=transforms)
    test_set = BiONetDataset(args.valid_data, args.valid_dataset)
    train_loader = DataLoader(dataset=train_set,
                              batch_size=args.batch_size,
                              shuffle=False,
                              drop_last=True,
                              num_workers=0,
                              pin_memory=True)
    test_loader = DataLoader(dataset=test_set,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=0,
                             pin_memory=True)

    # create model
    model = BiONet(iterations=args.iter,
                   num_classes=args.num_class,
                   num_layers=4,
                   multiplier=args.multiplier,
                   integrate=args.integrate).to(device).float()

    criterion = BCELoss()
    optimizer = Adam(params=model.parameters(), lr=args.lr, weight_decay=0.)

    # keras lr decay equivalent
    fcn = lambda step: 1. / (1. + args.lr_decay * step)
    scheduler = LambdaLR(optimizer, lr_lambda=fcn)

    print('model successfully built and compiled.')

    if not os.path.isdir("checkpoints/" + args.exp):
        os.mkdir("checkpoints/" + args.exp)

    best_iou = 0.
    print('\nStart training...')
    for epoch in range(args.epochs):
        tot_loss = 0.
        tot_iou = 0.
        tot_dice = 0.
        val_loss = 0.
        val_iou = 0.
        val_dice = 0.

        # training
        model.train()
        for step, (x, y) in enumerate(
                tqdm(train_loader,
                     desc='[TRAIN] Epoch ' + str(epoch + 1) + '/' +
                     str(args.epochs))):
            if step >= args.steps:
                break
            x = x.to(device).float()
            y = y.to(device).float()

            optimizer.zero_grad()
            output = model(x)

            # loss
            l = criterion(output, y)
            tot_loss += l.item()
            l.backward()
            optimizer.step()

            # metrics
            x, y = output.detach().cpu().numpy(), y.detach().cpu().numpy()
            iou_score = iou(y, x)
            dice_score = dice_coef(y, x)
            tot_iou += iou_score
            tot_dice += dice_score

            scheduler.step()

        print('[TRAIN] Epoch: ' + str(epoch + 1) + '/' + str(args.epochs),
              'loss:', tot_loss / args.steps, 'iou:', tot_iou / args.steps,
              'dice:', tot_dice / args.steps)

        # validation
        model.eval()
        with torch.no_grad():
            for step, (x, y) in enumerate(
                    tqdm(test_loader,
                         desc='[VAL] Epoch ' + str(epoch + 1) + '/' +
                         str(args.epochs))):
                x = x.to(device).float()
                y = y.to(device).float()

                output = model(x)

                # loss
                l = criterion(output, y)
                val_loss += l.item()

                # metrics
                x, y = output.detach().cpu().numpy(), y.cpu().numpy()
                iou_score = iou(y, x)
                dice_score = dice_coef(y, x)
                val_iou += iou_score
                val_dice += dice_score

        if val_iou / len(test_loader) > best_iou:
            best_iou = val_iou / len(test_loader)
            save_model(args, model)

        print('[VAL] Epoch: ' + str(epoch + 1) + '/' + str(args.epochs),
              'val_loss:', val_loss / len(test_loader), 'val_iou:',
              val_iou / len(test_loader), 'val_dice:',
              val_dice / len(test_loader), 'best val_iou:', best_iou)

    print('\nTraining fininshed!')
Пример #10
0
    elif augmentation == 'shift_scale_rotate':
        transform = ShiftScaleRotate(always_apply=True, shift_limit=0.1, 
                                                        scale_limit=0.5)
        transformed_image = transform(image=image)['image']

    elif augmentation == 'scalex':
        transform = iaa.ScaleX((0.5, 1.5))
        transformed_image = transform(image=image)

    elif augmentation == 'scaley':
        transform = iaa.ScaleY((0.5, 1.5))
        transformed_image = transform(image=image)

    elif augmentation == 'translatex':
        transform = iaa.TranslateX(px=(200))
        transformed_image = transform(image=image)

    elif augmentation == 'translatey':
        transform = iaa.TranslateY(px=(-200))
        transformed_image = transform(image=image)

    ## Crop

    elif augmentation == 'crop':
        transform = Crop(always_apply=True, x_max=400, y_max=400)
        transformed_image = transform(image=image)['image']

    elif augmentation == 'crop_to_fixed_size':
        transform = iaa.CropToFixedSize(width=300, height=300)
        transformed_image = transform(image=image)
Пример #11
0
result_originals = path.joinpath("AugmentedOriginals")
result_masks = path.joinpath("AugmentedMasks")

for i in range(IMAGE_COUNT):
    istr = str(i % 900) + ".jpg"
    original = imageio.imread(originals.joinpath(istr))
    mask = imageio.imread(masks.joinpath(istr))
    mask = SegmentationMapsOnImage(mask, shape=mask.shape)

    seq = iaa.SomeOf((0, None), random_order=True)

    seq.add(iaa.Add((-40, 40), per_channel=0.5))
    seq.add(iaa.GaussianBlur(sigma=(0, 2)))
    seq.add(iaa.SigmoidContrast(gain=(5, 20), cutoff=(0.3, 0.75), per_channel=True))
    seq.add(iaa.HorizontalFlip())
    seq.add(iaa.VerticalFlip())
    seq.add(iaa.TranslateX(percent=(-0.7, 0.7), cval=33))
    seq.add(iaa.TranslateY(percent=(-0.7, 0.7), cval=33))
    seq.add(iaa.Rotate(random.randrange(-60, 60), cval=33))
    seq.add(iaa.ScaleX((0.5, 1.5), cval=33))
    seq.add(iaa.ScaleY((0.5, 1.5), cval=33))
    seq.add(iaa.imgcorruptlike.DefocusBlur(severity=1))
    aug = iaa.CropAndPad(percent=([-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3]))

    results_o, results_m = seq(image=original, segmentation_maps=mask)

    istr = str(i) + ".jpg"
    imageio.imsave(result_originals.joinpath(istr), results_o)
    imageio.imsave(result_masks.joinpath(istr), results_m.arr)
def da_policy(image, label):

    img_size = 224

    #image = sample[0]
    policy = np.random.randint(4)

    #policy = 2
    if policy == 0:

        p = np.random.random()
        if p <= 0.6:
            aug = iaa.TranslateX(px=(-60, 60), cval=128)
            image = aug(image=image)

        p = np.random.random()
        if p <= 0.8:
            aug = iaa.HistogramEqualization()
            image = aug(image=image)

    elif policy == 1:

        p = np.random.random()
        if p <= 0.2:
            aug = iaa.TranslateY(px=(int(-0.18 * img_size),
                                     int(0.18 * img_size)),
                                 cval=128)
            image = aug(image=image)

        p = np.random.random()
        if p <= 0.8:
            square_size = np.random.randint(48)
            aug = iaa.Cutout(nb_iterations=1,
                             size=square_size / img_size,
                             squared=True)
            image = aug(image=image)

    elif policy == 2:
        p = np.random.random()
        if p <= 1:
            aug = iaa.ShearY(shear=(int(-0.06 * img_size),
                                    int(0.06 * img_size)),
                             order=1,
                             cval=128)
            image = aug(image=image)

        p = np.random.random()
        if p <= 0.6:
            aug = iaa.TranslateX(px=(-60, 60), cval=128)
            image = aug(image=image)

    elif policy == 3:
        p = np.random.random()
        if p <= 0.6:
            aug = iaa.Rotate(rotate=(-30, 30), order=1, cval=128)
            image = aug(image=image)

        p = np.random.random()
        if p <= 1:
            aug = iaa.MultiplySaturation((0.54, 1.54))
            image = aug(image=image)

    #Para EFFICIENTNET NO es necesario NORMALIZAR
    return (tf.cast(image, tf.float32), tf.cast(label, tf.int64))