Esempio n. 1
0
def chapter_augmenters_translatey():
    fn_start = "geometric/translatey"

    image = ia.quokka(size=(128, 128))

    aug = iaa.TranslateY(px=(-20, 20))
    run_and_save_augseq(
        fn_start + "_absolute.jpg", aug,
        [image for _ in range(4*1)], cols=4, rows=2)

    aug = iaa.TranslateY(percent=(-0.1, 0.1))
    run_and_save_augseq(
        fn_start + "_relative.jpg", aug,
        [image for _ in range(4*1)], cols=4, rows=2)
Esempio n. 2
0
 def shift(image):
     # shift image randomly 10 percent
     aug1 = iaa.TranslateX(percent=(-0.1, 0.1))
     aug2 = iaa.TranslateY(percent=(-0.1, 0.1))
     image_aug = aug1(image=image)
     image_aug = aug2(image=image_aug)
     return image_aug
def initialise_augmenter():
    # Horizontal and Vertical Flips (set to 1 as the SomeOf function will choose when to apply these itself)
    horizontal_flip = iaa.Fliplr(1)  #0.5)
    vertical_flip = iaa.Flipud(1)  #0.5)

    # 90, 180 and 270 degree rotations
    rotate_90 = iaa.Affine(rotate=90)
    rotate_180 = iaa.Affine(rotate=180)
    rotate_270 = iaa.Affine(rotate=270)

    # Translations of -10% to 10% of the image's pixels
    translate_x = iaa.TranslateX(percent=(-0.1, 0.1))
    translate_y = iaa.TranslateY(percent=(-0.1, 0.1))

    # Scale the image between 0.75 and 1.1 of the original size
    scale_x = iaa.ScaleX((0.75, 1.1))
    scale_y = iaa.ScaleY((0.75, 1.1))

    # Shear the image between -20 and 20 degrees
    shear_x = iaa.ShearX((-20, 20))
    shear_y = iaa.ShearY((-20, 20))

    augmentation = iaa.SomeOf((0, None), [
        horizontal_flip, vertical_flip,
        iaa.OneOf([rotate_90, rotate_180, rotate_270]), translate_x,
        translate_y, scale_x, scale_y, shear_x, shear_y
    ],
                              random_order=True)

    return augmentation
Esempio n. 4
0
def shiftY(shift_amount, input_path, output_path, image_count):
  images = []
  labels = []

  for img_path in range(image_count):
    img = imageio.imread(input_path + '/images/' + str(img_path) + '.png')
    images.append(img) 

    lbl = imageio.imread(input_path + '/labels/' + str(img_path) + '.png')
    labels.append(lbl)
  
  seq = iaa.Sequential(
    [

        iaa.TranslateY(px=(shift_amount))

    ]
  )

  images_aug = seq(images=images)
  labels_aug = seq(images=labels)

  path = os.path.join(output_path, 'images') 
  os.mkdir(path) 

  path = os.path.join(output_path, 'labels') 
  os.mkdir(path)

  for indx, i in enumerate(images_aug):
      imageio.imwrite(output_path + '/images/'  + 'shifted'+ '_' + str(indx) + '.png', i)

  for indx, i in enumerate(labels_aug):
      imageio.imwrite(output_path + '/labels/'  + 'shifted'+ '_' + str(indx) + '.png', i)

  print("Shift results were saved given directory.")
 def trim(self, img, percent=0.1, flag='x'):
     if flag == 'x':
         aug = iaa.TranslateX(percent=percent)
         img = aug(images=[img])
     else:
         aug = iaa.TranslateY(percent=percent)
         img = aug(images=[img])
     return img[0]
def dictTranslateX(baseImageListFunc, baseMaskListFunc, fullImageListFunc, segmapListFunc):
    print('TranslateX, starting number of images:', len(segmapListFunc))
    translateX_x00percent = 2
    translateX = iaa.TranslateY(percent=(-0.2, 0.2), mode="reflect")
    translateX._mode_segmentation_maps = "reflect"
    alteredImageListFunc, alteredMaskListFunc = expandList(baseImageListFunc, baseMaskListFunc, translateX_x00percent)
    (alteredImageListFunc, alteredMaskListFunc) = translateX(images=alteredImageListFunc,
                                                             segmentation_maps=alteredMaskListFunc)

    fullImageListFunc.extend(alteredImageListFunc)
    segmapListFunc.extend(alteredMaskListFunc)
    return fullImageListFunc, segmapListFunc
Esempio n. 7
0
def train(args):
    # augmentations
    transforms = iaa.Sequential([
        iaa.Rotate((-15., 15.)),
        iaa.TranslateX(percent=(-0.05, 0.05)),
        iaa.TranslateY(percent=(-0.05, 0.05)),
        iaa.Affine(shear=(-50, 50)),
        iaa.Affine(scale=(0.8, 1.2)),
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5)
    ])

    # load data and create data loaders
    train_set = BiONetDataset(args.train_data,
                              'monuseg',
                              batchsize=args.batch_size,
                              steps=args.steps,
                              transforms=transforms)
    test_set = BiONetDataset(args.valid_data, args.valid_dataset)
    train_loader = DataLoader(dataset=train_set,
                              batch_size=args.batch_size,
                              shuffle=False,
                              drop_last=True,
                              num_workers=0,
                              pin_memory=True)
    test_loader = DataLoader(dataset=test_set,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=0,
                             pin_memory=True)

    # create model
    model = BiONet(iterations=args.iter,
                   num_classes=args.num_class,
                   num_layers=4,
                   multiplier=args.multiplier,
                   integrate=args.integrate).to(device).float()

    criterion = BCELoss()
    optimizer = Adam(params=model.parameters(), lr=args.lr, weight_decay=0.)

    # keras lr decay equivalent
    fcn = lambda step: 1. / (1. + args.lr_decay * step)
    scheduler = LambdaLR(optimizer, lr_lambda=fcn)

    print('model successfully built and compiled.')

    if not os.path.isdir("checkpoints/" + args.exp):
        os.mkdir("checkpoints/" + args.exp)

    best_iou = 0.
    print('\nStart training...')
    for epoch in range(args.epochs):
        tot_loss = 0.
        tot_iou = 0.
        tot_dice = 0.
        val_loss = 0.
        val_iou = 0.
        val_dice = 0.

        # training
        model.train()
        for step, (x, y) in enumerate(
                tqdm(train_loader,
                     desc='[TRAIN] Epoch ' + str(epoch + 1) + '/' +
                     str(args.epochs))):
            if step >= args.steps:
                break
            x = x.to(device).float()
            y = y.to(device).float()

            optimizer.zero_grad()
            output = model(x)

            # loss
            l = criterion(output, y)
            tot_loss += l.item()
            l.backward()
            optimizer.step()

            # metrics
            x, y = output.detach().cpu().numpy(), y.detach().cpu().numpy()
            iou_score = iou(y, x)
            dice_score = dice_coef(y, x)
            tot_iou += iou_score
            tot_dice += dice_score

            scheduler.step()

        print('[TRAIN] Epoch: ' + str(epoch + 1) + '/' + str(args.epochs),
              'loss:', tot_loss / args.steps, 'iou:', tot_iou / args.steps,
              'dice:', tot_dice / args.steps)

        # validation
        model.eval()
        with torch.no_grad():
            for step, (x, y) in enumerate(
                    tqdm(test_loader,
                         desc='[VAL] Epoch ' + str(epoch + 1) + '/' +
                         str(args.epochs))):
                x = x.to(device).float()
                y = y.to(device).float()

                output = model(x)

                # loss
                l = criterion(output, y)
                val_loss += l.item()

                # metrics
                x, y = output.detach().cpu().numpy(), y.cpu().numpy()
                iou_score = iou(y, x)
                dice_score = dice_coef(y, x)
                val_iou += iou_score
                val_dice += dice_score

        if val_iou / len(test_loader) > best_iou:
            best_iou = val_iou / len(test_loader)
            save_model(args, model)

        print('[VAL] Epoch: ' + str(epoch + 1) + '/' + str(args.epochs),
              'val_loss:', val_loss / len(test_loader), 'val_iou:',
              val_iou / len(test_loader), 'val_dice:',
              val_dice / len(test_loader), 'best val_iou:', best_iou)

    print('\nTraining fininshed!')
Esempio n. 8
0
        transformed_image = transform(image=image)['image']

    elif augmentation == 'scalex':
        transform = iaa.ScaleX((0.5, 1.5))
        transformed_image = transform(image=image)

    elif augmentation == 'scaley':
        transform = iaa.ScaleY((0.5, 1.5))
        transformed_image = transform(image=image)

    elif augmentation == 'translatex':
        transform = iaa.TranslateX(px=(200))
        transformed_image = transform(image=image)

    elif augmentation == 'translatey':
        transform = iaa.TranslateY(px=(-200))
        transformed_image = transform(image=image)

    ## Crop

    elif augmentation == 'crop':
        transform = Crop(always_apply=True, x_max=400, y_max=400)
        transformed_image = transform(image=image)['image']

    elif augmentation == 'crop_to_fixed_size':
        transform = iaa.CropToFixedSize(width=300, height=300)
        transformed_image = transform(image=image)

    elif augmentation == 'crop_to_multiples_of':
        transform = iaa.CropToPowersOf(height_base=3, width_base=2)
        transformed_image = transform(image=image)
Esempio n. 9
0
result_originals = path.joinpath("AugmentedOriginals")
result_masks = path.joinpath("AugmentedMasks")

for i in range(IMAGE_COUNT):
    istr = str(i % 900) + ".jpg"
    original = imageio.imread(originals.joinpath(istr))
    mask = imageio.imread(masks.joinpath(istr))
    mask = SegmentationMapsOnImage(mask, shape=mask.shape)

    seq = iaa.SomeOf((0, None), random_order=True)

    seq.add(iaa.Add((-40, 40), per_channel=0.5))
    seq.add(iaa.GaussianBlur(sigma=(0, 2)))
    seq.add(iaa.SigmoidContrast(gain=(5, 20), cutoff=(0.3, 0.75), per_channel=True))
    seq.add(iaa.HorizontalFlip())
    seq.add(iaa.VerticalFlip())
    seq.add(iaa.TranslateX(percent=(-0.7, 0.7), cval=33))
    seq.add(iaa.TranslateY(percent=(-0.7, 0.7), cval=33))
    seq.add(iaa.Rotate(random.randrange(-60, 60), cval=33))
    seq.add(iaa.ScaleX((0.5, 1.5), cval=33))
    seq.add(iaa.ScaleY((0.5, 1.5), cval=33))
    seq.add(iaa.imgcorruptlike.DefocusBlur(severity=1))
    aug = iaa.CropAndPad(percent=([-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3], [-0.3, 0.3]))

    results_o, results_m = seq(image=original, segmentation_maps=mask)

    istr = str(i) + ".jpg"
    imageio.imsave(result_originals.joinpath(istr), results_o)
    imageio.imsave(result_masks.joinpath(istr), results_m.arr)
def da_policy(image, label):

    img_size = 224

    #image = sample[0]
    policy = np.random.randint(4)

    #policy = 2
    if policy == 0:

        p = np.random.random()
        if p <= 0.6:
            aug = iaa.TranslateX(px=(-60, 60), cval=128)
            image = aug(image=image)

        p = np.random.random()
        if p <= 0.8:
            aug = iaa.HistogramEqualization()
            image = aug(image=image)

    elif policy == 1:

        p = np.random.random()
        if p <= 0.2:
            aug = iaa.TranslateY(px=(int(-0.18 * img_size),
                                     int(0.18 * img_size)),
                                 cval=128)
            image = aug(image=image)

        p = np.random.random()
        if p <= 0.8:
            square_size = np.random.randint(48)
            aug = iaa.Cutout(nb_iterations=1,
                             size=square_size / img_size,
                             squared=True)
            image = aug(image=image)

    elif policy == 2:
        p = np.random.random()
        if p <= 1:
            aug = iaa.ShearY(shear=(int(-0.06 * img_size),
                                    int(0.06 * img_size)),
                             order=1,
                             cval=128)
            image = aug(image=image)

        p = np.random.random()
        if p <= 0.6:
            aug = iaa.TranslateX(px=(-60, 60), cval=128)
            image = aug(image=image)

    elif policy == 3:
        p = np.random.random()
        if p <= 0.6:
            aug = iaa.Rotate(rotate=(-30, 30), order=1, cval=128)
            image = aug(image=image)

        p = np.random.random()
        if p <= 1:
            aug = iaa.MultiplySaturation((0.54, 1.54))
            image = aug(image=image)

    #Para EFFICIENTNET NO es necesario NORMALIZAR
    return (tf.cast(image, tf.float32), tf.cast(label, tf.int64))