Exemple #1
0
 def get_augmenters():
     return [
         iaa.Scale((224, 224)),
         iaa.Sometimes(0.25, iaa.GaussianBlur(sigma=(0, 3.0))),
         iaa.Fliplr(0.5),
         iaa.Affine(rotate=(-20, 20), mode='symmetric'),
         iaa.Sometimes(
             0.25,
             iaa.OneOf([
                 iaa.Dropout(p=(0, 0.1)),
                 iaa.CoarseDropout(0.1, size_percent=0.5)
             ])),
         iaa.AddToHueAndSaturation(value=(-10, 10), per_channel=True),
         iaa.Sometimes(0.3,
                       iaa.OneOf([iaa.Add(-10),
                                  iaa.Add(45),
                                  iaa.Add(80)])),
         iaa.Sometimes(0.3,
                       iaa.OneOf([iaa.Dropout(0.03),
                                  iaa.Dropout(0.05)])),
         iaa.Sometimes(
             0.3,
             iaa.OneOf([
                 iaa.ContrastNormalization(0.5),
                 iaa.ContrastNormalization(1.2),
                 iaa.PerspectiveTransform(0.075),
                 iaa.PerspectiveTransform(0.100),
                 iaa.PerspectiveTransform(0.125)
             ])),
         iaa.Sometimes(
             0.3,
             iaa.OneOf([
                 iaa.Grayscale(alpha=1.0),
                 iaa.Grayscale(alpha=0.5),
                 iaa.Grayscale(alpha=0.2)
             ])),
         iaa.Sometimes(
             0.3,
             iaa.OneOf([
                 iaa.CoarsePepper(size_percent=0.30),
                 iaa.CoarsePepper(size_percent=0.02),
                 iaa.CoarsePepper(size_percent=0.1)
             ])),
         iaa.Sometimes(
             0.3,
             iaa.OneOf(
                 [iaa.SaltAndPepper(p=0.05),
                  iaa.SaltAndPepper(p=0.03)])),
     ]
Exemple #2
0
    def train(self, sess, data, labels, learning_rate):
        ### do hiding?
        if len(self.do_hide) > 0:  # do_hide is num of grid
            N = np.random.choice(self.do_hide, 1)[0]

            ### if N == 0: use full image
            if N != 0:
                n, w, h, _ = data.shape
                mask = net.gen_random_patch(shape=(n, w, h), N=N)
                mask = np.expand_dims(mask, axis=3)

                data = data * mask + (1 - mask) * self.image_mean

        ### do augmentation?
        if self.do_augmentation == 1:
            data = iaa.Sequential([
                iaa.Fliplr(0.25),
                iaa.Flipud(0.25),
                iaa.Sometimes(0.25, iaa.Affine(rotate=(-180, 180))),
                iaa.Sometimes(
                    0.2,
                    iaa.Affine(translate_percent={
                        'x': (-0.15, 0.15),
                        'y': (-0.15, 0.15)
                    }))
            ]).augment_images(data)
        elif self.do_augmentation == 2:
            data = iaa.Sequential([
                iaa.Fliplr(0.25),
                iaa.Flipud(0.25),
                iaa.Sometimes(0.25, iaa.Affine(rotate=(-180, 180))),
                iaa.Sometimes(
                    0.2,
                    iaa.Affine(translate_percent={
                        'x': (-0.1, 0.1),
                        'y': (-0.1, 0.1)
                    })),
                iaa.Sometimes(
                    0.2,
                    iaa.OneOf([
                        iaa.CoarseDropout(0.2, size_percent=(0.05, 0.1)),
                        iaa.CoarseSalt(0.2, size_percent=(0.05, 0.1)),
                        iaa.CoarsePepper(0.2, size_percent=(0.05, 0.1)),
                        iaa.CoarseSaltAndPepper(0.2, size_percent=(0.05, 0.1))
                    ]))
            ]).augment_images(data)

        _, loss, scores, hits, summary = sess.run(
            [
                self.train_op, self.loss_op, self.score_op, self.hit_op,
                self.summary_op
            ],
            feed_dict={
                self.inputs: data,
                self.labels: labels,
                self.learning_rate: learning_rate,
                self.is_training: True
            })

        return loss, scores, hits, summary
Exemple #3
0
 def logic(self, image):
     for param in self.augmentation_params:
         self.augmentation_data.append([
             str(param.augmentation_value),
             iaa.CoarsePepper(
                 p=0.2, size_percent=param.augmentation_value,
                 min_size=2).to_deterministic().augment_image(image),
             param.detection_tag
         ])
def chapter_augmenters_coarsepepper():
    fn_start = "arithmetic/coarsepepper"

    aug = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))
    run_and_save_augseq(fn_start + ".jpg",
                        aug, [ia.quokka(size=(128, 128)) for _ in range(8)],
                        cols=4,
                        rows=2,
                        quality=95)
Exemple #5
0
def main():
    args = parser.parse_args()

    # Data augmentation
    global seq_geo
    global seq_img
    seq_geo = iaa.SomeOf(
        (0, 5),
        [
            iaa.Fliplr(0.5),  # horizontally flip 50% of the images
            iaa.PerspectiveTransform(scale=(0, 0.075)),
            iaa.Affine(
                scale={
                    "x": (0.8, 1.0),
                    "y": (0.8, 1.0)
                },
                rotate=(-5, 5),
                translate_percent={
                    "x": (-0.1, 0.1),
                    "y": (-0.1, 0.1)
                },
            ),  # rotate by -45 to +45 degrees),
            iaa.Crop(pc=(
                0, 0.125
            )),  # crop images from each side by 0 to 12.5% (randomly chosen)
            iaa.CoarsePepper(p=0.01, size_percent=0.1)
        ],
        random_order=False)
    # Content transformation
    seq_img = iaa.SomeOf(
        (0, 3),
        [
            iaa.GaussianBlur(
                sigma=(0, 1.0)),  # blur images with a sigma of 0 to 2.0
            iaa.ContrastNormalization(alpha=(0.9, 1.1)),
            iaa.Grayscale(alpha=(0, 0.2)),
            iaa.Multiply((0.9, 1.1))
        ])

    # We store all arguments in a json file. This has two advantages:
    # 1. We can always get back and see what exactly that experiment was
    # 2. We can resume an experiment as-is without needing to remember all flags.
    args_file = os.path.join(args.experiment_root, 'args.json')
    if args.resume:
        if not os.path.isfile(args_file):
            raise IOError('`args.json` not found in {}'.format(args_file))

        print('Loading args from {}.'.format(args_file))
        with open(args_file, 'r') as f:
            args_resumed = json.load(f)
        args_resumed['resume'] = True  # This would be overwritten.

        # When resuming, we not only want to populate the args object with the
        # values from the file, but we also want to check for some possible
        # conflicts between loaded and given arguments.
        for key, value in args.__dict__.items():
            if key in args_resumed:
                resumed_value = args_resumed[key]
                if resumed_value != value:
                    print('Warning: For the argument `{}` we are using the'
                          ' loaded value `{}`. The provided value was `{}`'
                          '.'.format(key, resumed_value, value))
                    args.__dict__[key] = resumed_value
            else:
                print('Warning: A new argument was added since the last run:'
                      ' `{}`. Using the new value: `{}`.'.format(key, value))

    else:
        # If the experiment directory exists already, we bail in fear.
        if os.path.exists(args.experiment_root):
            if os.listdir(args.experiment_root):
                print('The directory {} already exists and is not empty.'
                      ' If you want to resume training, append --resume to'
                      ' your call.'.format(args.experiment_root))
                exit(1)
        else:
            os.makedirs(args.experiment_root)

        # Store the passed arguments for later resuming and grepping in a nice
        # and readable format.
        with open(args_file, 'w') as f:
            json.dump(vars(args),
                      f,
                      ensure_ascii=False,
                      indent=2,
                      sort_keys=True)

    log_file = os.path.join(args.experiment_root, "train")
    logging.config.dictConfig(common.get_logging_dict(log_file))
    log = logging.getLogger('train')

    # Also show all parameter values at the start, for ease of reading logs.
    log.info('Training using the following parameters:')
    for key, value in sorted(vars(args).items()):
        log.info('{}: {}'.format(key, value))

    # Check them here, so they are not required when --resume-ing.
    if not args.train_set:
        parser.print_help()
        log.error("You did not specify the `train_set` argument!")
        sys.exit(1)
    if not args.image_root:
        parser.print_help()
        log.error("You did not specify the required `image_root` argument!")
        sys.exit(1)

    # Load the data from the CSV file.
    pids, fids = common.load_dataset(args.train_set, args.image_root)
    max_fid_len = max(map(len, fids))  # We'll need this later for logfiles.

    # Load feature embeddings
    if args.hard_pool_size > 0:
        with h5py.File(args.train_embeddings, 'r') as f_train:
            train_embs = np.array(f_train['emb'])
            f_dists = scipy.spatial.distance.cdist(train_embs, train_embs)
            hard_ids = get_hard_id_pool(pids, f_dists, args.hard_pool_size)

    # Setup a tf.Dataset where one "epoch" loops over all PIDS.
    # PIDS are shuffled after every epoch and continue indefinitely.
    unique_pids = np.unique(pids)
    dataset = tf.data.Dataset.from_tensor_slices(unique_pids)
    dataset = dataset.shuffle(len(unique_pids))

    # Constrain the dataset size to a multiple of the batch-size, so that
    # we don't get overlap at the end of each epoch.
    if args.hard_pool_size == 0:
        dataset = dataset.take(
            (len(unique_pids) // args.batch_p) * args.batch_p)
        dataset = dataset.repeat(
            None)  # Repeat forever. Funny way of stating it.

    else:
        dataset = dataset.repeat(
            None)  # Repeat forever. Funny way of stating it.
        dataset = dataset.map(lambda pid: sample_batch_ids_for_pid(
            pid, all_pids=pids, batch_p=args.batch_p, all_hard_pids=hard_ids))
        # Unbatch the P PIDs
        dataset = dataset.apply(tf.contrib.data.unbatch())

    # For every PID, get K images.
    dataset = dataset.map(lambda pid: sample_k_fids_for_pid(
        pid, all_fids=fids, all_pids=pids, batch_k=args.batch_k))

    # Ungroup/flatten the batches for easy loading of the files.
    dataset = dataset.apply(tf.contrib.data.unbatch())

    # Convert filenames to actual image tensors.
    net_input_size = (args.net_input_height, args.net_input_width)
    pre_crop_size = (args.pre_crop_height, args.pre_crop_width)
    dataset = dataset.map(lambda im, fid, pid: common.fid_to_image(
        fid,
        pid,
        image_root=args.image_root,
        image_size=pre_crop_size if args.crop_augment else net_input_size),
                          num_parallel_calls=args.loading_threads)

    # Augment the data if specified by the arguments.
    if args.augment == False:
        dataset = dataset.map(
            lambda fid, pid: common.fid_to_image(fid,
                                                 pid,
                                                 image_root=args.image_root,
                                                 image_size=pre_crop_size
                                                 if args.crop_augment else
                                                 net_input_size),  #Ergys
            num_parallel_calls=args.loading_threads)

        if args.flip_augment:
            dataset = dataset.map(lambda im, fid, pid: (
                tf.image.random_flip_left_right(im), fid, pid))
        if args.crop_augment:
            dataset = dataset.map(lambda im, fid, pid: (tf.random_crop(
                im, net_input_size + (3, )), fid, pid))
    else:
        dataset = dataset.map(lambda im, fid, pid: common.fid_to_image(
            fid, pid, image_root=args.image_root, image_size=net_input_size),
                              num_parallel_calls=args.loading_threads)

        dataset = dataset.map(lambda im, fid, pid: (tf.py_func(
            augment_images, [im], [tf.float32]), fid, pid))
        dataset = dataset.map(lambda im, fid, pid: (tf.reshape(
            im[0],
            (args.net_input_height, args.net_input_width, 3)), fid, pid))

    # Group it back into PK batches.
    batch_size = args.batch_p * args.batch_k
    dataset = dataset.batch(batch_size)

    # Overlap producing and consuming for parallelism.
    dataset = dataset.prefetch(batch_size * 2)

    # Since we repeat the data infinitely, we only need a one-shot iterator.
    images, fids, pids = dataset.make_one_shot_iterator().get_next()

    # Create the model and an embedding head.
    model = import_module('nets.' + args.model_name)
    head = import_module('heads.' + args.head_name)

    # Feed the image through the model. The returned `body_prefix` will be used
    # further down to load the pre-trained weights for all variables with this
    # prefix.
    endpoints, body_prefix = model.endpoints(images, is_training=True)
    with tf.name_scope('head'):
        endpoints = head.head(endpoints, args.embedding_dim, is_training=True)

    # Create the loss in two steps:
    # 1. Compute all pairwise distances according to the specified metric.
    # 2. For each anchor along the first dimension, compute its loss.
    dists = loss.cdist(endpoints['emb'], endpoints['emb'], metric=args.metric)
    losses, train_top1, prec_at_k, _, neg_dists, pos_dists = loss.LOSS_CHOICES[
        args.loss](dists,
                   pids,
                   args.margin,
                   batch_precision_at_k=args.batch_k - 1)

    # Count the number of active entries, and compute the total batch loss.
    num_active = tf.reduce_sum(tf.cast(tf.greater(losses, 1e-5), tf.float32))
    loss_mean = tf.reduce_mean(losses)

    # Some logging for tensorboard.
    tf.summary.histogram('loss_distribution', losses)
    tf.summary.scalar('loss', loss_mean)
    tf.summary.scalar('batch_top1', train_top1)
    tf.summary.scalar('batch_prec_at_{}'.format(args.batch_k - 1), prec_at_k)
    tf.summary.scalar('active_count', num_active)
    tf.summary.histogram('embedding_dists', dists)
    tf.summary.histogram('embedding_pos_dists', pos_dists)
    tf.summary.histogram('embedding_neg_dists', neg_dists)
    tf.summary.histogram('embedding_lengths',
                         tf.norm(endpoints['emb_raw'], axis=1))

    # Create the mem-mapped arrays in which we'll log all training detail in
    # addition to tensorboard, because tensorboard is annoying for detailed
    # inspection and actually discards data in histogram summaries.
    if args.detailed_logs:
        log_embs = lb.create_or_resize_dat(
            os.path.join(args.experiment_root, 'embeddings'),
            dtype=np.float32,
            shape=(args.train_iterations, batch_size, args.embedding_dim))
        log_loss = lb.create_or_resize_dat(
            os.path.join(args.experiment_root, 'losses'),
            dtype=np.float32,
            shape=(args.train_iterations, batch_size))
        log_fids = lb.create_or_resize_dat(
            os.path.join(args.experiment_root, 'fids'),
            dtype='S' + str(max_fid_len),
            shape=(args.train_iterations, batch_size))

    # These are collected here before we add the optimizer, because depending
    # on the optimizer, it might add extra slots, which are also global
    # variables, with the exact same prefix.
    model_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                        body_prefix)

    # Define the optimizer and the learning-rate schedule.
    # Unfortunately, we get NaNs if we don't handle no-decay separately.
    global_step = tf.Variable(0, name='global_step', trainable=False)
    if 0 <= args.decay_start_iteration < args.train_iterations:
        learning_rate = tf.train.exponential_decay(
            args.learning_rate,
            tf.maximum(0, global_step - args.decay_start_iteration),
            args.train_iterations - args.decay_start_iteration, 0.001)
    else:
        learning_rate = args.learning_rate
    tf.summary.scalar('learning_rate', learning_rate)
    optimizer = tf.train.AdamOptimizer(learning_rate)
    # Feel free to try others!
    # optimizer = tf.train.AdadeltaOptimizer(learning_rate)

    # Update_ops are used to update batchnorm stats.
    with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
        train_op = optimizer.minimize(loss_mean, global_step=global_step)

    # Define a saver for the complete model.
    checkpoint_saver = tf.train.Saver(max_to_keep=0)

    with tf.Session() as sess:
        if args.resume:
            # In case we're resuming, simply load the full checkpoint to init.
            last_checkpoint = tf.train.latest_checkpoint(args.experiment_root)
            log.info('Restoring from checkpoint: {}'.format(last_checkpoint))
            checkpoint_saver.restore(sess, last_checkpoint)
        else:
            # But if we're starting from scratch, we may need to load some
            # variables from the pre-trained weights, and random init others.
            sess.run(tf.global_variables_initializer())
            if args.initial_checkpoint is not None:
                saver = tf.train.Saver(model_variables)
                saver.restore(sess, args.initial_checkpoint)

            # In any case, we also store this initialization as a checkpoint,
            # such that we could run exactly reproduceable experiments.
            checkpoint_saver.save(sess,
                                  os.path.join(args.experiment_root,
                                               'checkpoint'),
                                  global_step=0)

        merged_summary = tf.summary.merge_all()
        summary_writer = tf.summary.FileWriter(args.experiment_root,
                                               sess.graph)

        start_step = sess.run(global_step)
        log.info('Starting training from iteration {}.'.format(start_step))

        # Finally, here comes the main-loop. This `Uninterrupt` is a handy
        # utility such that an iteration still finishes on Ctrl+C and we can
        # stop the training cleanly.
        with lb.Uninterrupt(sigs=[SIGINT, SIGTERM], verbose=True) as u:
            for i in range(start_step, args.train_iterations):

                # Compute gradients, update weights, store logs!
                start_time = time.time()
                _, summary, step, b_prec_at_k, b_embs, b_loss, b_fids = \
                    sess.run([train_op, merged_summary, global_step,
                              prec_at_k, endpoints['emb'], losses, fids])
                elapsed_time = time.time() - start_time

                # Compute the iteration speed and add it to the summary.
                # We did observe some weird spikes that we couldn't track down.
                summary2 = tf.Summary()
                summary2.value.add(tag='secs_per_iter',
                                   simple_value=elapsed_time)
                summary_writer.add_summary(summary2, step)
                summary_writer.add_summary(summary, step)

                if args.detailed_logs:
                    log_embs[i], log_loss[i], log_fids[
                        i] = b_embs, b_loss, b_fids

                # Do a huge print out of the current progress.
                seconds_todo = (args.train_iterations - step) * elapsed_time
                log.info(
                    'iter:{:6d}, loss min|avg|max: {:.3f}|{:.3f}|{:6.3f}, '
                    'batch-p@{}: {:.2%}, ETA: {} ({:.2f}s/it)'.format(
                        step, float(np.min(b_loss)), float(np.mean(b_loss)),
                        float(np.max(b_loss)), args.batch_k - 1,
                        float(b_prec_at_k),
                        timedelta(seconds=int(seconds_todo)), elapsed_time))
                sys.stdout.flush()
                sys.stderr.flush()

                # Save a checkpoint of training every so often.
                if (args.checkpoint_frequency > 0
                        and step % args.checkpoint_frequency == 0):
                    checkpoint_saver.save(sess,
                                          os.path.join(args.experiment_root,
                                                       'checkpoint'),
                                          global_step=step)

                # Stop the main-loop at the end of the step, if requested.
                if u.interrupted:
                    log.info("Interrupted on request!")
                    break

        # Store one final checkpoint. This might be redundant, but it is crucial
        # in case intermediate storing was disabled and it saves a checkpoint
        # when the process was interrupted.
        checkpoint_saver.save(sess,
                              os.path.join(args.experiment_root, 'checkpoint'),
                              global_step=step)
def draw_per_augmenter_images():
    print("[draw_per_augmenter_images] Loading image...")
    #image = misc.imresize(ndimage.imread("quokka.jpg")[0:643, 0:643], (128, 128))
    image = ia.quokka_square(size=(128, 128))

    keypoints = [ia.Keypoint(x=34, y=15), ia.Keypoint(x=85, y=13), ia.Keypoint(x=63, y=73)] # left ear, right ear, mouth
    keypoints = [ia.KeypointsOnImage(keypoints, shape=image.shape)]

    print("[draw_per_augmenter_images] Initializing...")
    rows_augmenters = [
        (0, "Noop", [("", iaa.Noop()) for _ in sm.xrange(5)]),
        (0, "Crop\n(top, right,\nbottom, left)", [(str(vals), iaa.Crop(px=vals)) for vals in [(2, 0, 0, 0), (0, 8, 8, 0), (4, 0, 16, 4), (8, 0, 0, 32), (32, 64, 0, 0)]]),
        (0, "Pad\n(top, right,\nbottom, left)", [(str(vals), iaa.Pad(px=vals)) for vals in [(2, 0, 0, 0), (0, 8, 8, 0), (4, 0, 16, 4), (8, 0, 0, 32), (32, 64, 0, 0)]]),
        (0, "Fliplr", [(str(p), iaa.Fliplr(p)) for p in [0, 0, 1, 1, 1]]),
        (0, "Flipud", [(str(p), iaa.Flipud(p)) for p in [0, 0, 1, 1, 1]]),
        (0, "Superpixels\np_replace=1", [("n_segments=%d" % (n_segments,), iaa.Superpixels(p_replace=1.0, n_segments=n_segments)) for n_segments in [25, 50, 75, 100, 125]]),
        (0, "Superpixels\nn_segments=100", [("p_replace=%.2f" % (p_replace,), iaa.Superpixels(p_replace=p_replace, n_segments=100)) for p_replace in [0, 0.25, 0.5, 0.75, 1.0]]),
        (0, "Invert", [("p=%d" % (p,), iaa.Invert(p=p)) for p in [0, 0, 1, 1, 1]]),
        (0, "Invert\n(per_channel)", [("p=%.2f" % (p,), iaa.Invert(p=p, per_channel=True)) for p in [0.5, 0.5, 0.5, 0.5, 0.5]]),
        (0, "Add", [("value=%d" % (val,), iaa.Add(val)) for val in [-45, -25, 0, 25, 45]]),
        (0, "Add\n(per channel)", [("value=(%d, %d)" % (vals[0], vals[1],), iaa.Add(vals, per_channel=True)) for vals in [(-55, -35), (-35, -15), (-10, 10), (15, 35), (35, 55)]]),
        (0, "AddToHueAndSaturation", [("value=%d" % (val,), iaa.AddToHueAndSaturation(val)) for val in [-45, -25, 0, 25, 45]]),
        (0, "Multiply", [("value=%.2f" % (val,), iaa.Multiply(val)) for val in [0.25, 0.5, 1.0, 1.25, 1.5]]),
        (1, "Multiply\n(per channel)", [("value=(%.2f, %.2f)" % (vals[0], vals[1],), iaa.Multiply(vals, per_channel=True)) for vals in [(0.15, 0.35), (0.4, 0.6), (0.9, 1.1), (1.15, 1.35), (1.4, 1.6)]]),
        (0, "GaussianBlur", [("sigma=%.2f" % (sigma,), iaa.GaussianBlur(sigma=sigma)) for sigma in [0.25, 0.50, 1.0, 2.0, 4.0]]),
        (0, "AverageBlur", [("k=%d" % (k,), iaa.AverageBlur(k=k)) for k in [1, 3, 5, 7, 9]]),
        (0, "MedianBlur", [("k=%d" % (k,), iaa.MedianBlur(k=k)) for k in [1, 3, 5, 7, 9]]),
        (0, "BilateralBlur\nsigma_color=250,\nsigma_space=250", [("d=%d" % (d,), iaa.BilateralBlur(d=d, sigma_color=250, sigma_space=250)) for d in [1, 3, 5, 7, 9]]),
        (0, "Sharpen\n(alpha=1)", [("lightness=%.2f" % (lightness,), iaa.Sharpen(alpha=1, lightness=lightness)) for lightness in [0, 0.5, 1.0, 1.5, 2.0]]),
        (0, "Emboss\n(alpha=1)", [("strength=%.2f" % (strength,), iaa.Emboss(alpha=1, strength=strength)) for strength in [0, 0.5, 1.0, 1.5, 2.0]]),
        (0, "EdgeDetect", [("alpha=%.2f" % (alpha,), iaa.EdgeDetect(alpha=alpha)) for alpha in [0.0, 0.25, 0.5, 0.75, 1.0]]),
        (0, "DirectedEdgeDetect\n(alpha=1)", [("direction=%.2f" % (direction,), iaa.DirectedEdgeDetect(alpha=1, direction=direction)) for direction in [0.0, 1*(360/5)/360, 2*(360/5)/360, 3*(360/5)/360, 4*(360/5)/360]]),
        (0, "AdditiveGaussianNoise", [("scale=%.2f*255" % (scale,), iaa.AdditiveGaussianNoise(scale=scale * 255)) for scale in [0.025, 0.05, 0.1, 0.2, 0.3]]),
        (0, "AdditiveGaussianNoise\n(per channel)", [("scale=%.2f*255" % (scale,), iaa.AdditiveGaussianNoise(scale=scale * 255, per_channel=True)) for scale in [0.025, 0.05, 0.1, 0.2, 0.3]]),
        (0, "Dropout", [("p=%.2f" % (p,), iaa.Dropout(p=p)) for p in [0.025, 0.05, 0.1, 0.2, 0.4]]),
        (0, "Dropout\n(per channel)", [("p=%.2f" % (p,), iaa.Dropout(p=p, per_channel=True)) for p in [0.025, 0.05, 0.1, 0.2, 0.4]]),
        (3, "CoarseDropout\n(p=0.2)", [("size_percent=%.2f" % (size_percent,), iaa.CoarseDropout(p=0.2, size_percent=size_percent, min_size=2)) for size_percent in [0.3, 0.2, 0.1, 0.05, 0.02]]),
        (0, "CoarseDropout\n(p=0.2, per channel)", [("size_percent=%.2f" % (size_percent,), iaa.CoarseDropout(p=0.2, size_percent=size_percent, per_channel=True, min_size=2)) for size_percent in [0.3, 0.2, 0.1, 0.05, 0.02]]),
        (0, "SaltAndPepper", [("p=%.2f" % (p,), iaa.SaltAndPepper(p=p)) for p in [0.025, 0.05, 0.1, 0.2, 0.4]]),
        (0, "Salt", [("p=%.2f" % (p,), iaa.Salt(p=p)) for p in [0.025, 0.05, 0.1, 0.2, 0.4]]),
        (0, "Pepper", [("p=%.2f" % (p,), iaa.Pepper(p=p)) for p in [0.025, 0.05, 0.1, 0.2, 0.4]]),
        (0, "CoarseSaltAndPepper\n(p=0.2)", [("size_percent=%.2f" % (size_percent,), iaa.CoarseSaltAndPepper(p=0.2, size_percent=size_percent, min_size=2)) for size_percent in [0.3, 0.2, 0.1, 0.05, 0.02]]),
        (0, "CoarseSalt\n(p=0.2)", [("size_percent=%.2f" % (size_percent,), iaa.CoarseSalt(p=0.2, size_percent=size_percent, min_size=2)) for size_percent in [0.3, 0.2, 0.1, 0.05, 0.02]]),
        (0, "CoarsePepper\n(p=0.2)", [("size_percent=%.2f" % (size_percent,), iaa.CoarsePepper(p=0.2, size_percent=size_percent, min_size=2)) for size_percent in [0.3, 0.2, 0.1, 0.05, 0.02]]),
        (0, "ContrastNormalization", [("alpha=%.1f" % (alpha,), iaa.ContrastNormalization(alpha=alpha)) for alpha in [0.5, 0.75, 1.0, 1.25, 1.50]]),
        (0, "ContrastNormalization\n(per channel)", [("alpha=(%.2f, %.2f)" % (alphas[0], alphas[1],), iaa.ContrastNormalization(alpha=alphas, per_channel=True)) for alphas in [(0.4, 0.6), (0.65, 0.85), (0.9, 1.1), (1.15, 1.35), (1.4, 1.6)]]),
        (0, "Grayscale", [("alpha=%.1f" % (alpha,), iaa.Grayscale(alpha=alpha)) for alpha in [0.0, 0.25, 0.5, 0.75, 1.0]]),
        (6, "PerspectiveTransform", [("scale=%.3f" % (scale,), iaa.PerspectiveTransform(scale=scale)) for scale in [0.025, 0.05, 0.075, 0.10, 0.125]]),
        (0, "PiecewiseAffine", [("scale=%.3f" % (scale,), iaa.PiecewiseAffine(scale=scale)) for scale in [0.015, 0.03, 0.045, 0.06, 0.075]]),
        (0, "Affine: Scale", [("%.1fx" % (scale,), iaa.Affine(scale=scale)) for scale in [0.1, 0.5, 1.0, 1.5, 1.9]]),
        (0, "Affine: Translate", [("x=%d y=%d" % (x, y), iaa.Affine(translate_px={"x": x, "y": y})) for x, y in [(-32, -16), (-16, -32), (-16, -8), (16, 8), (16, 32)]]),
        (0, "Affine: Rotate", [("%d deg" % (rotate,), iaa.Affine(rotate=rotate)) for rotate in [-90, -45, 0, 45, 90]]),
        (0, "Affine: Shear", [("%d deg" % (shear,), iaa.Affine(shear=shear)) for shear in [-45, -25, 0, 25, 45]]),
        (0, "Affine: Modes", [(mode, iaa.Affine(translate_px=-32, mode=mode)) for mode in ["constant", "edge", "symmetric", "reflect", "wrap"]]),
        (0, "Affine: cval", [("%d" % (int(cval*255),), iaa.Affine(translate_px=-32, cval=int(cval*255), mode="constant")) for cval in [0.0, 0.25, 0.5, 0.75, 1.0]]),
        (
            2, "Affine: all", [
                (
                    "",
                    iaa.Affine(
                        scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
                        translate_px={"x": (-32, 32), "y": (-32, 32)},
                        rotate=(-45, 45),
                        shear=(-32, 32),
                        mode=ia.ALL,
                        cval=(0.0, 1.0)
                    )
                )
                for _ in sm.xrange(5)
            ]
        ),
        (1, "ElasticTransformation\n(sigma=0.2)", [("alpha=%.1f" % (alpha,), iaa.ElasticTransformation(alpha=alpha, sigma=0.2)) for alpha in [0.1, 0.5, 1.0, 3.0, 9.0]]),
        (0, "Alpha\nwith EdgeDetect(1.0)", [("factor=%.1f" % (factor,), iaa.Alpha(factor=factor, first=iaa.EdgeDetect(1.0))) for factor in [0.0, 0.25, 0.5, 0.75, 1.0]]),
        (4, "Alpha\nwith EdgeDetect(1.0)\n(per channel)", [("factor=(%.2f, %.2f)" % (factor[0], factor[1]), iaa.Alpha(factor=factor, first=iaa.EdgeDetect(1.0), per_channel=0.5)) for factor in [(0.0, 0.2), (0.15, 0.35), (0.4, 0.6), (0.65, 0.85), (0.8, 1.0)]]),
        (15, "SimplexNoiseAlpha\nwith EdgeDetect(1.0)", [("", iaa.SimplexNoiseAlpha(first=iaa.EdgeDetect(1.0))) for alpha in [0.0, 0.25, 0.5, 0.75, 1.0]]),
        (9, "FrequencyNoiseAlpha\nwith EdgeDetect(1.0)", [("exponent=%.1f" % (exponent,), iaa.FrequencyNoiseAlpha(exponent=exponent, first=iaa.EdgeDetect(1.0), size_px_max=16, upscale_method="linear", sigmoid=False)) for exponent in [-4, -2, 0, 2, 4]])
    ]

    print("[draw_per_augmenter_images] Augmenting...")
    rows = []
    for (row_seed, row_name, augmenters) in rows_augmenters:
        ia.seed(row_seed)
        #for img_title, augmenter in augmenters:
        #    #aug.reseed(1000)
        #    pass

        row_images = []
        row_keypoints = []
        row_titles = []
        for img_title, augmenter in augmenters:
            aug_det = augmenter.to_deterministic()
            row_images.append(aug_det.augment_image(image))
            row_keypoints.append(aug_det.augment_keypoints(keypoints)[0])
            row_titles.append(img_title)
        rows.append((row_name, row_images, row_keypoints, row_titles))

    # matplotlib drawin routine
    """
    print("[draw_per_augmenter_images] Plotting...")
    width = 8
    height = int(1.5 * len(rows_augmenters))
    fig = plt.figure(figsize=(width, height))
    grid_rows = len(rows)
    grid_cols = 1 + 5
    gs = gridspec.GridSpec(grid_rows, grid_cols, width_ratios=[2, 1, 1, 1, 1, 1])
    axes = []
    for i in sm.xrange(grid_rows):
        axes.append([plt.subplot(gs[i, col_idx]) for col_idx in sm.xrange(grid_cols)])
    fig.tight_layout()
    #fig.subplots_adjust(bottom=0.2 / grid_rows, hspace=0.22)
    #fig.subplots_adjust(wspace=0.005, hspace=0.425, bottom=0.02)
    fig.subplots_adjust(wspace=0.005, hspace=0.005, bottom=0.02)

    for row_idx, (row_name, row_images, row_keypoints, row_titles) in enumerate(rows):
        axes_row = axes[row_idx]

        for col_idx in sm.xrange(grid_cols):
            ax = axes_row[col_idx]

            ax.cla()
            ax.axis("off")
            ax.get_xaxis().set_visible(False)
            ax.get_yaxis().set_visible(False)

            if col_idx == 0:
                ax.text(0, 0.5, row_name, color="black")
            else:
                cell_image = row_images[col_idx-1]
                cell_keypoints = row_keypoints[col_idx-1]
                cell_image_kp = cell_keypoints.draw_on_image(cell_image, size=5)
                ax.imshow(cell_image_kp)
                x = 0
                y = 145
                #ax.text(x, y, row_titles[col_idx-1], color="black", backgroundcolor="white", fontsize=6)
                ax.text(x, y, row_titles[col_idx-1], color="black", fontsize=7)


    fig.savefig("examples.jpg", bbox_inches="tight")
    #plt.show()
    """

    # simpler and faster drawing routine
    """
    output_image = ExamplesImage(128, 128, 128+64, 32)
    for (row_name, row_images, row_keypoints, row_titles) in rows:
        row_images_kps = []
        for image, keypoints in zip(row_images, row_keypoints):
            row_images_kps.append(keypoints.draw_on_image(image, size=5))
        output_image.add_row(row_name, row_images_kps, row_titles)
    misc.imsave("examples.jpg", output_image.draw())
    """

    # routine to draw many single files
    seen = defaultdict(lambda: 0)
    markups = []
    for (row_name, row_images, row_keypoints, row_titles) in rows:
        output_image = ExamplesImage(128, 128, 128+64, 32)
        row_images_kps = []
        for image, keypoints in zip(row_images, row_keypoints):
            row_images_kps.append(keypoints.draw_on_image(image, size=5))
        output_image.add_row(row_name, row_images_kps, row_titles)
        if "\n" in row_name:
            row_name_clean = row_name[0:row_name.find("\n")+1]
        else:
            row_name_clean = row_name
        row_name_clean = re.sub(r"[^a-z0-9]+", "_", row_name_clean.lower())
        row_name_clean = row_name_clean.strip("_")
        if seen[row_name_clean] > 0:
            row_name_clean = "%s_%d" % (row_name_clean, seen[row_name_clean] + 1)
        fp = os.path.join(IMAGES_DIR, "examples_%s.jpg" % (row_name_clean,))
        #misc.imsave(fp, output_image.draw())
        save(fp, output_image.draw())
        seen[row_name_clean] += 1

        markup_descr = row_name.replace('"', '') \
                               .replace("\n", " ") \
                               .replace("(", "") \
                               .replace(")", "")
        markup = '![%s](%s?raw=true "%s")' % (markup_descr, fp, markup_descr)
        markups.append(markup)

    for markup in markups:
        print(markup)
Exemple #7
0
    # Replaces percent of all pixels with salt noise
    "Salt": lambda percent: iaa.Salt(percent),

    # Adds coarse salt noise to image, i.e. rectangles that contain noisy white-ish pixels
    # Replaces percent of all pixels with salt in an image that has lo to hi percent of the input image size,
    # then upscales the results to the input image size, leading to large rectangular areas being replaced.
    "Coarse_Salt": lambda percent, lo, hi: iaa.CoarseSalt(percent, size_percent=(lo, hi)),

    # Adds Pepper noise to an image, i.e Black-ish pixels
    # Replaces percent of all pixels with Pepper noise
    "Pepper": lambda percent: iaa.Pepper(percent),

    # Adds coarse pepper noise to image, i.e. rectangles that contain noisy black-ish pixels
    # Replaces percent of all pixels with salt in an image that has lo to hi percent of the input image size,
    # then upscales the results to the input image size, leading to large rectangular areas being replaced.
    "Coarse_Pepper": lambda percent, lo, hi: iaa.CoarsePepper(percent, size_percent=(lo, hi)),

    # In an alpha blending, two images are naively mixed. E.g. Let A be the foreground image, B be the background
    # image and a is the alpha value. Each pixel intensity is then computed as a * A_ij + (1-a) * B_ij.
    # Images passed in must be a numpy array of type (height, width, channel)
    "Blend_Alpha": lambda image_fg, image_bg, alpha: iaa.blend_alpha(image_fg, image_bg, alpha),

    # Blur/Denoise an image using a bilateral filter.
    # Bilateral filters blur homogeneous and textured areas, while trying to preserve edges.
    # Blurs all images using a bilateral filter with max distance d_lo to d_hi with ranges for sigma_colour
    # and sigma space being define by sc_lo/sc_hi and ss_lo/ss_hi
    "Bilateral_Blur": lambda d_lo, d_hi, sc_lo, sc_hi, ss_lo, ss_hi:
    iaa.BilateralBlur(d=(d_lo, d_hi), sigma_color=(sc_lo, sc_hi), sigma_space=(ss_lo, ss_hi)),

    # Augmenter that sharpens images and overlays the result with the original image.
    # Create a motion blur augmenter with kernel size of (kernel x kernel) and a blur angle of either x or y degrees
Exemple #8
0
def main():
    parser = argparse.ArgumentParser(description="Check augmenters visually.")
    parser.add_argument(
        "--only",
        default=None,
        help=
        "If this is set, then only the results of an augmenter with this name will be shown. "
        "Optionally, comma-separated list.",
        required=False)
    args = parser.parse_args()

    images = [
        ia.quokka_square(size=(128, 128)),
        ia.imresize_single_image(data.astronaut(), (128, 128))
    ]

    keypoints = [
        ia.KeypointsOnImage([
            ia.Keypoint(x=50, y=40),
            ia.Keypoint(x=70, y=38),
            ia.Keypoint(x=62, y=52)
        ],
                            shape=images[0].shape),
        ia.KeypointsOnImage([
            ia.Keypoint(x=55, y=32),
            ia.Keypoint(x=42, y=95),
            ia.Keypoint(x=75, y=89)
        ],
                            shape=images[1].shape)
    ]

    bounding_boxes = [
        ia.BoundingBoxesOnImage([
            ia.BoundingBox(x1=10, y1=10, x2=20, y2=20),
            ia.BoundingBox(x1=40, y1=50, x2=70, y2=60)
        ],
                                shape=images[0].shape),
        ia.BoundingBoxesOnImage([
            ia.BoundingBox(x1=10, y1=10, x2=20, y2=20),
            ia.BoundingBox(x1=40, y1=50, x2=70, y2=60)
        ],
                                shape=images[1].shape)
    ]

    augmenters = [
        iaa.Sequential([
            iaa.CoarseDropout(p=0.5, size_percent=0.05),
            iaa.AdditiveGaussianNoise(scale=0.1 * 255),
            iaa.Crop(percent=0.1)
        ],
                       name="Sequential"),
        iaa.SomeOf(2,
                   children=[
                       iaa.CoarseDropout(p=0.5, size_percent=0.05),
                       iaa.AdditiveGaussianNoise(scale=0.1 * 255),
                       iaa.Crop(percent=0.1)
                   ],
                   name="SomeOf"),
        iaa.OneOf(children=[
            iaa.CoarseDropout(p=0.5, size_percent=0.05),
            iaa.AdditiveGaussianNoise(scale=0.1 * 255),
            iaa.Crop(percent=0.1)
        ],
                  name="OneOf"),
        iaa.Sometimes(0.5,
                      iaa.AdditiveGaussianNoise(scale=0.1 * 255),
                      name="Sometimes"),
        iaa.WithColorspace("HSV",
                           children=[iaa.Add(20)],
                           name="WithColorspace"),
        iaa.WithChannels([0], children=[iaa.Add(20)], name="WithChannels"),
        iaa.AddToHueAndSaturation((-20, 20),
                                  per_channel=True,
                                  name="AddToHueAndSaturation"),
        iaa.Noop(name="Noop"),
        iaa.Resize({
            "width": 64,
            "height": 64
        }, name="Resize"),
        iaa.CropAndPad(px=(-8, 8), name="CropAndPad-px"),
        iaa.Pad(px=(0, 8), name="Pad-px"),
        iaa.Crop(px=(0, 8), name="Crop-px"),
        iaa.Crop(percent=(0, 0.1), name="Crop-percent"),
        iaa.Fliplr(0.5, name="Fliplr"),
        iaa.Flipud(0.5, name="Flipud"),
        iaa.Superpixels(p_replace=0.75, n_segments=50, name="Superpixels"),
        iaa.Grayscale(0.5, name="Grayscale0.5"),
        iaa.Grayscale(1.0, name="Grayscale1.0"),
        iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
        iaa.AverageBlur(k=(3, 11), name="AverageBlur"),
        iaa.MedianBlur(k=(3, 11), name="MedianBlur"),
        iaa.BilateralBlur(d=10, name="BilateralBlur"),
        iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0, 2.0), name="Sharpen"),
        iaa.Emboss(alpha=(0.1, 1.0), strength=(0, 2.0), name="Emboss"),
        iaa.EdgeDetect(alpha=(0.1, 1.0), name="EdgeDetect"),
        iaa.DirectedEdgeDetect(alpha=(0.1, 1.0),
                               direction=(0, 1.0),
                               name="DirectedEdgeDetect"),
        iaa.Add((-50, 50), name="Add"),
        iaa.Add((-50, 50), per_channel=True, name="AddPerChannel"),
        iaa.AddElementwise((-50, 50), name="AddElementwise"),
        iaa.AdditiveGaussianNoise(loc=0,
                                  scale=(0.0, 0.1 * 255),
                                  name="AdditiveGaussianNoise"),
        iaa.Multiply((0.5, 1.5), name="Multiply"),
        iaa.Multiply((0.5, 1.5), per_channel=True, name="MultiplyPerChannel"),
        iaa.MultiplyElementwise((0.5, 1.5), name="MultiplyElementwise"),
        iaa.Dropout((0.0, 0.1), name="Dropout"),
        iaa.CoarseDropout(p=0.05,
                          size_percent=(0.05, 0.5),
                          name="CoarseDropout"),
        iaa.Invert(p=0.5, name="Invert"),
        iaa.Invert(p=0.5, per_channel=True, name="InvertPerChannel"),
        iaa.ContrastNormalization(alpha=(0.5, 2.0),
                                  name="ContrastNormalization"),
        iaa.SaltAndPepper(p=0.05, name="SaltAndPepper"),
        iaa.Salt(p=0.05, name="Salt"),
        iaa.Pepper(p=0.05, name="Pepper"),
        iaa.CoarseSaltAndPepper(p=0.05,
                                size_percent=(0.01, 0.1),
                                name="CoarseSaltAndPepper"),
        iaa.CoarseSalt(p=0.05, size_percent=(0.01, 0.1), name="CoarseSalt"),
        iaa.CoarsePepper(p=0.05, size_percent=(0.01, 0.1),
                         name="CoarsePepper"),
        iaa.Affine(scale={
            "x": (0.8, 1.2),
            "y": (0.8, 1.2)
        },
                   translate_px={
                       "x": (-16, 16),
                       "y": (-16, 16)
                   },
                   rotate=(-45, 45),
                   shear=(-16, 16),
                   order=ia.ALL,
                   cval=(0, 255),
                   mode=ia.ALL,
                   name="Affine"),
        iaa.PiecewiseAffine(scale=0.03,
                            nb_rows=(2, 6),
                            nb_cols=(2, 6),
                            name="PiecewiseAffine"),
        iaa.PerspectiveTransform(scale=0.1, name="PerspectiveTransform"),
        iaa.ElasticTransformation(alpha=(0.5, 8.0),
                                  sigma=1.0,
                                  name="ElasticTransformation"),
        iaa.Alpha(factor=(0.0, 1.0),
                  first=iaa.Add(100),
                  second=iaa.Dropout(0.5),
                  per_channel=False,
                  name="Alpha"),
        iaa.Alpha(factor=(0.0, 1.0),
                  first=iaa.Add(100),
                  second=iaa.Dropout(0.5),
                  per_channel=True,
                  name="AlphaPerChannel"),
        iaa.Alpha(factor=(0.0, 1.0),
                  first=iaa.Affine(rotate=(-45, 45)),
                  per_channel=True,
                  name="AlphaAffine"),
        iaa.AlphaElementwise(factor=(0.0, 1.0),
                             first=iaa.Add(50),
                             second=iaa.ContrastNormalization(2.0),
                             per_channel=False,
                             name="AlphaElementwise"),
        iaa.AlphaElementwise(factor=(0.0, 1.0),
                             first=iaa.Add(50),
                             second=iaa.ContrastNormalization(2.0),
                             per_channel=True,
                             name="AlphaElementwisePerChannel"),
        iaa.AlphaElementwise(factor=(0.0, 1.0),
                             first=iaa.Affine(rotate=(-45, 45)),
                             per_channel=True,
                             name="AlphaElementwiseAffine"),
        iaa.SimplexNoiseAlpha(first=iaa.EdgeDetect(1.0),
                              per_channel=False,
                              name="SimplexNoiseAlpha"),
        iaa.FrequencyNoiseAlpha(first=iaa.EdgeDetect(1.0),
                                per_channel=False,
                                name="FrequencyNoiseAlpha")
    ]

    augmenters.append(
        iaa.Sequential([iaa.Sometimes(0.2, aug.copy()) for aug in augmenters],
                       name="Sequential"))
    augmenters.append(
        iaa.Sometimes(0.5, [aug.copy() for aug in augmenters],
                      name="Sometimes"))

    for augmenter in augmenters:
        if args.only is None or augmenter.name in [
                v.strip() for v in args.only.split(",")
        ]:
            print("Augmenter: %s" % (augmenter.name, ))
            grid = []
            for image, kps, bbs in zip(images, keypoints, bounding_boxes):
                aug_det = augmenter.to_deterministic()
                imgs_aug = aug_det.augment_images(
                    np.tile(image[np.newaxis, ...], (16, 1, 1, 1)))
                kps_aug = aug_det.augment_keypoints([kps] * 16)
                bbs_aug = aug_det.augment_bounding_boxes([bbs] * 16)
                imgs_aug_drawn = [
                    kps_aug_one.draw_on_image(img_aug)
                    for img_aug, kps_aug_one in zip(imgs_aug, kps_aug)
                ]
                imgs_aug_drawn = [
                    bbs_aug_one.draw_on_image(img_aug)
                    for img_aug, bbs_aug_one in zip(imgs_aug_drawn, bbs_aug)
                ]
                grid.append(np.hstack(imgs_aug_drawn))
            ia.imshow(np.vstack(grid))
Exemple #9
0
def simple_imgaug_example():
    image_dir_path = dataset_home_dir_path + '/phenotyping/cvppp2017_lsc_lcc_challenge/package/CVPPP2017_LSC_training/training/A1'
    label_dir_path = dataset_home_dir_path + '/phenotyping/cvppp2017_lsc_lcc_challenge/package/CVPPP2017_LSC_training/training/A1'
    images, labels = prepare_dataset(image_dir_path, label_dir_path)

    image_width, image_height = 200, 200

    # FIXME [decide] >> Before or after random transformation?
    # Preprocessing (normalization, standardization, etc).
    images_pp = images.astype(np.float)
    #images_pp /= 255.0
    images_pp = standardize_samplewise(images_pp)
    #images_pp = standardize_featurewise(images_pp)

    if True:
        augmenter = iaa.SomeOf(
            (1, 2),
            [
                iaa.OneOf([
                    iaa.Affine(
                        scale={
                            'x': (0.8, 1.2),
                            'y': (0.8, 1.2)
                        },  # Scale images to 80-120% of their size, individually per axis.
                        translate_percent={
                            'x': (-0.1, 0.1),
                            'y': (-0.1, 0.1)
                        },  # Translate by -10 to +10 percent (per axis).
                        rotate=(-10, 10),  # Rotate by -10 to +10 degrees.
                        shear=(-5, 5),  # Shear by -5 to +5 degrees.
                        #order=[0, 1],  # Use nearest neighbour or bilinear interpolation (fast).
                        order=
                        0,  # Use nearest neighbour or bilinear interpolation (fast).
                        #cval=(0, 255),  # If mode is constant, use a cval between 0 and 255.
                        #mode=ia.ALL  # Use any of scikit-image's warping modes (see 2nd image from the top for examples).
                        #mode='edge'  # Use any of scikit-image's warping modes (see 2nd image from the top for examples).
                    ),
                    #iaa.PiecewiseAffine(scale=(0.01, 0.05)),  # Move parts of the image around. Slow.
                    iaa.PerspectiveTransform(scale=(0.01, 0.1)),
                    iaa.ElasticTransformation(
                        alpha=(20.0, 50.0), sigma=(6.5, 8.5)
                    ),  # Move pixels locally around (with random strengths).
                ]),
                iaa.OneOf([
                    iaa.GaussianBlur(sigma=(
                        0,
                        3.0)),  # Blur images with a sigma between 0 and 3.0.
                    iaa.AverageBlur(
                        k=(2, 7)
                    ),  # Blur image using local means with kernel sizes between 2 and 7.
                    iaa.MedianBlur(
                        k=(3, 11)
                    ),  # Blur image using local medians with kernel sizes between 2 and 7.
                    iaa.MotionBlur(k=(5, 11),
                                   angle=(0, 360),
                                   direction=(-1.0, 1.0),
                                   order=1),
                ]),
                iaa.OneOf([
                    iaa.AdditiveGaussianNoise(
                        loc=0, scale=(0.1 * 255, 0.5 * 255),
                        per_channel=False),  # Add Gaussian noise to images.
                    iaa.AdditiveLaplaceNoise(loc=0,
                                             scale=(0.1 * 255, 0.4 * 255),
                                             per_channel=False),
                    iaa.AdditivePoissonNoise(lam=(32, 96), per_channel=False),
                    iaa.CoarseSaltAndPepper(p=(0.1, 0.3),
                                            size_percent=(0.2, 0.9),
                                            per_channel=False),
                    iaa.CoarseSalt(p=(0.1, 0.3),
                                   size_percent=(0.2, 0.9),
                                   per_channel=False),
                    iaa.CoarsePepper(p=(0.1, 0.3),
                                     size_percent=(0.2, 0.9),
                                     per_channel=False),
                    iaa.CoarseDropout(p=(0.1, 0.3),
                                      size_percent=(0.05, 0.3),
                                      per_channel=False),
                ]),
                iaa.OneOf([
                    iaa.MultiplyHueAndSaturation(mul=(-10, 10),
                                                 per_channel=False),
                    iaa.AddToHueAndSaturation(value=(-255, 255),
                                              per_channel=False),
                    iaa.LinearContrast(
                        alpha=(0.5, 1.5),
                        per_channel=False),  # Improve or worsen the contrast.
                    iaa.Invert(p=1,
                               per_channel=False),  # Invert color channels.
                    iaa.Sharpen(alpha=(0, 1.0),
                                lightness=(0.75, 1.5)),  # Sharpen images.
                    iaa.Emboss(alpha=(0, 1.0),
                               strength=(0, 2.0)),  # Emboss images.
                ]),
            ],
            random_order=True)
    elif False:
        augmenter = iaa.Sequential(
            [
                # Apply the following augmenters to most images.
                iaa.Fliplr(0.5),  # Horizontally flip 50% of all images.
                iaa.Flipud(0.2),  # Vertically flip 20% of all images.
                # Crop images by -5% to 10% of their height/width.
                iaa.Sometimes(
                    0.5,
                    iaa.CropAndPad(percent=(-0.05, 0.1),
                                   pad_mode=ia.ALL,
                                   pad_cval=(0, 255))),
                iaa.Sometimes(
                    0.5,
                    iaa.Affine(
                        scale={
                            'x': (0.8, 1.2),
                            'y': (0.8, 1.2)
                        },  # Scale images to 80-120% of their size, individually per axis.
                        translate_percent={
                            'x': (-0.2, 0.2),
                            'y': (-0.2, 0.2)
                        },  # Translate by -20 to +20 percent (per axis).
                        rotate=(-45, 45),  # Rotate by -45 to +45 degrees.
                        shear=(-16, 16),  # Shear by -16 to +16 degrees.
                        order=[
                            0,
                            1
                        ],  # Use nearest neighbour or bilinear interpolation (fast).
                        cval=(
                            0, 255
                        ),  # If mode is constant, use a cval between 0 and 255.
                        mode=ia.
                        ALL  # Use any of scikit-image's warping modes (see 2nd image from the top for examples).
                    )),
                # Execute 0 to 5 of the following (less important) augmenters per image.
                # Don't execute all of them, as that would often be way too strong.
                iaa.SomeOf(
                    (0, 5),
                    [
                        iaa.Sometimes(
                            0.5,
                            iaa.Superpixels(p_replace=(0, 1.0),
                                            n_segments=(20, 200))
                        ),  # Convert images into their superpixel representation.
                        iaa.OneOf([
                            iaa.GaussianBlur(
                                (0, 3.0)
                            ),  # Blur images with a sigma between 0 and 3.0.
                            iaa.AverageBlur(
                                k=(2, 7)
                            ),  # Blur image using local means with kernel sizes between 2 and 7.
                            iaa.MedianBlur(
                                k=(3, 11)
                            ),  # Blur image using local medians with kernel sizes between 2 and 7.
                        ]),
                        iaa.Sharpen(alpha=(0, 1.0),
                                    lightness=(0.75, 1.5)),  # Sharpen images.
                        iaa.Emboss(alpha=(0, 1.0),
                                   strength=(0, 2.0)),  # Emboss images.
                        # Search either for all edges or for directed edges, blend the result with the original image using a blobby mask.
                        iaa.SimplexNoiseAlpha(
                            iaa.OneOf([
                                iaa.EdgeDetect(alpha=(0.5, 1.0)),
                                iaa.DirectedEdgeDetect(alpha=(0.5, 1.0),
                                                       direction=(0.0, 1.0)),
                            ])),
                        iaa.AdditiveGaussianNoise(
                            loc=0, scale=(0.0, 0.05 * 255),
                            per_channel=0.5),  # Add gaussian noise to images.
                        iaa.OneOf([
                            iaa.Dropout(
                                (0.01, 0.1), per_channel=0.5
                            ),  # Randomly remove up to 10% of the pixels.
                            iaa.CoarseDropout((0.03, 0.15),
                                              size_percent=(0.02, 0.05),
                                              per_channel=0.2),
                        ]),
                        iaa.Invert(0.05,
                                   per_channel=True),  # Invert color channels.
                        iaa.Add(
                            (-10, 10), per_channel=0.5
                        ),  # Change brightness of images (by -10 to 10 of original value).
                        iaa.AddToHueAndSaturation(
                            (-20, 20)),  # Change hue and saturation.
                        # Either change the brightness of the whole image (sometimes per channel) or change the brightness of subareas.
                        iaa.OneOf([
                            iaa.Multiply((0.5, 1.5), per_channel=0.5),
                            iaa.FrequencyNoiseAlpha(
                                exponent=(-4, 0),
                                first=iaa.Multiply(
                                    (0.5, 1.5), per_channel=True),
                                second=iaa.ContrastNormalization((0.5, 2.0)))
                        ]),
                        iaa.ContrastNormalization(
                            (0.5, 2.0), per_channel=0.5
                        ),  # Improve or worsen the contrast.
                        iaa.Grayscale(alpha=(0.0, 1.0)),
                        iaa.Sometimes(
                            0.5,
                            iaa.ElasticTransformation(alpha=(0.5, 3.5),
                                                      sigma=0.25)
                        ),  # Move pixels locally around (with random strengths).
                        iaa.Sometimes(
                            0.5, iaa.PiecewiseAffine(scale=(0.01, 0.05))
                        ),  # Sometimes move parts of the image around.
                        iaa.Sometimes(
                            0.5, iaa.PerspectiveTransform(scale=(0.01, 0.1)))
                    ],
                    random_order=True)
            ],
            random_order=True)
    else:
        augmenter = iaa.Sequential([
            iaa.SomeOf(
                1,
                [
                    #iaa.Sometimes(0.5, iaa.Crop(px=(0, 100))),  # Crop images from each side by 0 to 16px (randomly chosen).
                    iaa.Sometimes(0.5, iaa.Crop(percent=(
                        0,
                        0.1))),  # Crop images by 0-10% of their height/width.
                    iaa.Fliplr(0.5),  # Horizontally flip 50% of the images.
                    iaa.Flipud(0.5),  # Vertically flip 50% of the images.
                    iaa.Sometimes(
                        0.5,
                        iaa.Affine(
                            scale={
                                'x': (0.8, 1.2),
                                'y': (0.8, 1.2)
                            },  # Scale images to 80-120% of their size, individually per axis.
                            translate_percent={
                                'x': (-0.2, 0.2),
                                'y': (-0.2, 0.2)
                            },  # Translate by -20 to +20 percent (per axis).
                            rotate=(-45, 45),  # Rotate by -45 to +45 degrees.
                            shear=(-16, 16),  # Shear by -16 to +16 degrees.
                            #order=[0, 1],  # Use nearest neighbour or bilinear interpolation (fast).
                            order=
                            0,  # Use nearest neighbour or bilinear interpolation (fast).
                            #cval=(0, 255),  # If mode is constant, use a cval between 0 and 255.
                            #mode=ia.ALL  # Use any of scikit-image's warping modes (see 2nd image from the top for examples).
                            #mode='edge'  # Use any of scikit-image's warping modes (see 2nd image from the top for examples).
                        )),
                    iaa.Sometimes(0.5, iaa.GaussianBlur(
                        sigma=(0,
                               3.0)))  # Blur images with a sigma of 0 to 3.0.
                ]),
            iaa.Scale(size={
                'height': image_height,
                'width': image_width
            })  # Resize.
        ])

    for idx in range(images.shape[0]):
        images_pp[idx] = (images_pp[idx] - np.min(images_pp[idx])) / (
            np.max(images_pp[idx]) - np.min(images_pp[idx])) * 255
    images_pp = images_pp.astype(np.uint8)

    # Test 1 (good).
    augmenter_det = augmenter.to_deterministic(
    )  # Call this for each batch again, NOT only once at the start.
    #images_aug1 = augmenter_det.augment_images(images)
    images_aug1 = augmenter_det.augment_images(images_pp)
    labels_aug1 = augmenter_det.augment_images(labels)
    augmenter_det = augmenter.to_deterministic(
    )  # Call this for each batch again, NOT only once at the start.
    #images_aug2 = augmenter_det.augment_images(images)
    images_aug2 = augmenter_det.augment_images(images_pp)
    labels_aug2 = augmenter_det.augment_images(labels)

    #export_images(images, labels, './augmented1/img', '')
    export_images(images_pp, labels, './augmented1/img', '')
    export_images(images_aug1, labels_aug1, './augmented1/img', '_aug1')
    export_images(images_aug2, labels_aug2, './augmented1/img', '_aug2')

    # Test 2 (bad).
    augmenter_det = augmenter.to_deterministic(
    )  # Call this for each batch again, NOT only once at the start.
    #images_aug1 = augmenter_det.augment_images(images)
    images_aug1 = augmenter_det.augment_images(images_pp)
    labels_aug1 = augmenter_det.augment_images(labels)
    #images_aug2 = augmenter_det.augment_images(images)
    images_aug2 = augmenter_det.augment_images(images_pp)
    labels_aug2 = augmenter_det.augment_images(labels)

    #export_images(images, labels, './augmented2/img', '')
    export_images(images_pp, labels, './augmented2/img', '')
    export_images(images_aug1, labels_aug1, './augmented2/img', '_aug1')
    export_images(images_aug2, labels_aug2, './augmented2/img', '_aug2')

    print('*********************************', images_pp.dtype)
def create_augmenters(height, width, height_augmentable, width_augmentable,
                      only_augmenters):
    def lambda_func_images(images, random_state, parents, hooks):
        return images

    def lambda_func_heatmaps(heatmaps, random_state, parents, hooks):
        return heatmaps

    def lambda_func_keypoints(keypoints, random_state, parents, hooks):
        return keypoints

    def assertlambda_func_images(images, random_state, parents, hooks):
        return True

    def assertlambda_func_heatmaps(heatmaps, random_state, parents, hooks):
        return True

    def assertlambda_func_keypoints(keypoints, random_state, parents, hooks):
        return True

    augmenters_meta = [
        iaa.Sequential([iaa.Noop(), iaa.Noop()],
                       random_order=False,
                       name="Sequential_2xNoop"),
        iaa.Sequential([iaa.Noop(), iaa.Noop()],
                       random_order=True,
                       name="Sequential_2xNoop_random_order"),
        iaa.SomeOf((1, 3),
                   [iaa.Noop(), iaa.Noop(), iaa.Noop()],
                   random_order=False,
                   name="SomeOf_3xNoop"),
        iaa.SomeOf((1, 3),
                   [iaa.Noop(), iaa.Noop(), iaa.Noop()],
                   random_order=True,
                   name="SomeOf_3xNoop_random_order"),
        iaa.OneOf([iaa.Noop(), iaa.Noop(), iaa.Noop()], name="OneOf_3xNoop"),
        iaa.Sometimes(0.5, iaa.Noop(), name="Sometimes_Noop"),
        iaa.WithChannels([1, 2], iaa.Noop(), name="WithChannels_1_and_2_Noop"),
        iaa.Noop(name="Noop"),
        iaa.Lambda(func_images=lambda_func_images,
                   func_heatmaps=lambda_func_heatmaps,
                   func_keypoints=lambda_func_keypoints,
                   name="Lambda"),
        iaa.AssertLambda(func_images=assertlambda_func_images,
                         func_heatmaps=assertlambda_func_heatmaps,
                         func_keypoints=assertlambda_func_keypoints,
                         name="AssertLambda"),
        iaa.AssertShape((None, height_augmentable, width_augmentable, None),
                        name="AssertShape"),
        iaa.ChannelShuffle(0.5, name="ChannelShuffle")
    ]
    augmenters_arithmetic = [
        iaa.Add((-10, 10), name="Add"),
        iaa.AddElementwise((-10, 10), name="AddElementwise"),
        #iaa.AddElementwise((-500, 500), name="AddElementwise"),
        iaa.AdditiveGaussianNoise(scale=(5, 10), name="AdditiveGaussianNoise"),
        iaa.AdditiveLaplaceNoise(scale=(5, 10), name="AdditiveLaplaceNoise"),
        iaa.AdditivePoissonNoise(lam=(1, 5), name="AdditivePoissonNoise"),
        iaa.Multiply((0.5, 1.5), name="Multiply"),
        iaa.MultiplyElementwise((0.5, 1.5), name="MultiplyElementwise"),
        iaa.Dropout((0.01, 0.05), name="Dropout"),
        iaa.CoarseDropout((0.01, 0.05),
                          size_percent=(0.01, 0.1),
                          name="CoarseDropout"),
        iaa.ReplaceElementwise((0.01, 0.05), (0, 255),
                               name="ReplaceElementwise"),
        #iaa.ReplaceElementwise((0.95, 0.99), (0, 255), name="ReplaceElementwise"),
        iaa.SaltAndPepper((0.01, 0.05), name="SaltAndPepper"),
        iaa.ImpulseNoise((0.01, 0.05), name="ImpulseNoise"),
        iaa.CoarseSaltAndPepper((0.01, 0.05),
                                size_percent=(0.01, 0.1),
                                name="CoarseSaltAndPepper"),
        iaa.Salt((0.01, 0.05), name="Salt"),
        iaa.CoarseSalt((0.01, 0.05),
                       size_percent=(0.01, 0.1),
                       name="CoarseSalt"),
        iaa.Pepper((0.01, 0.05), name="Pepper"),
        iaa.CoarsePepper((0.01, 0.05),
                         size_percent=(0.01, 0.1),
                         name="CoarsePepper"),
        iaa.Invert(0.1, name="Invert"),
        # ContrastNormalization
        iaa.JpegCompression((50, 99), name="JpegCompression")
    ]
    augmenters_blend = [
        iaa.Alpha((0.01, 0.99), iaa.Noop(), name="Alpha"),
        iaa.AlphaElementwise((0.01, 0.99), iaa.Noop(),
                             name="AlphaElementwise"),
        iaa.SimplexNoiseAlpha(iaa.Noop(), name="SimplexNoiseAlpha"),
        iaa.FrequencyNoiseAlpha((-2.0, 2.0),
                                iaa.Noop(),
                                name="FrequencyNoiseAlpha")
    ]
    augmenters_blur = [
        iaa.GaussianBlur(sigma=(1.0, 5.0), name="GaussianBlur"),
        iaa.AverageBlur(k=(3, 11), name="AverageBlur"),
        iaa.MedianBlur(k=(3, 11), name="MedianBlur"),
        iaa.BilateralBlur(d=(3, 11), name="BilateralBlur"),
        iaa.MotionBlur(k=(3, 11), name="MotionBlur")
    ]
    augmenters_color = [
        # InColorspace (deprecated)
        iaa.WithColorspace(to_colorspace="HSV",
                           children=iaa.Noop(),
                           name="WithColorspace"),
        iaa.WithHueAndSaturation(children=iaa.Noop(),
                                 name="WithHueAndSaturation"),
        iaa.MultiplyHueAndSaturation((0.8, 1.2),
                                     name="MultiplyHueAndSaturation"),
        iaa.MultiplyHue((-1.0, 1.0), name="MultiplyHue"),
        iaa.MultiplySaturation((0.8, 1.2), name="MultiplySaturation"),
        iaa.AddToHueAndSaturation((-10, 10), name="AddToHueAndSaturation"),
        iaa.AddToHue((-10, 10), name="AddToHue"),
        iaa.AddToSaturation((-10, 10), name="AddToSaturation"),
        iaa.ChangeColorspace(to_colorspace="HSV", name="ChangeColorspace"),
        iaa.Grayscale((0.01, 0.99), name="Grayscale"),
        iaa.KMeansColorQuantization((2, 16), name="KMeansColorQuantization"),
        iaa.UniformColorQuantization((2, 16), name="UniformColorQuantization")
    ]
    augmenters_contrast = [
        iaa.GammaContrast(gamma=(0.5, 2.0), name="GammaContrast"),
        iaa.SigmoidContrast(gain=(5, 20),
                            cutoff=(0.25, 0.75),
                            name="SigmoidContrast"),
        iaa.LogContrast(gain=(0.7, 1.0), name="LogContrast"),
        iaa.LinearContrast((0.5, 1.5), name="LinearContrast"),
        iaa.AllChannelsCLAHE(clip_limit=(2, 10),
                             tile_grid_size_px=(3, 11),
                             name="AllChannelsCLAHE"),
        iaa.CLAHE(clip_limit=(2, 10),
                  tile_grid_size_px=(3, 11),
                  to_colorspace="HSV",
                  name="CLAHE"),
        iaa.AllChannelsHistogramEqualization(
            name="AllChannelsHistogramEqualization"),
        iaa.HistogramEqualization(to_colorspace="HSV",
                                  name="HistogramEqualization"),
    ]
    augmenters_convolutional = [
        iaa.Convolve(np.float32([[0, 0, 0], [0, 1, 0], [0, 0, 0]]),
                     name="Convolve_3x3"),
        iaa.Sharpen(alpha=(0.01, 0.99), lightness=(0.5, 2), name="Sharpen"),
        iaa.Emboss(alpha=(0.01, 0.99), strength=(0, 2), name="Emboss"),
        iaa.EdgeDetect(alpha=(0.01, 0.99), name="EdgeDetect"),
        iaa.DirectedEdgeDetect(alpha=(0.01, 0.99), name="DirectedEdgeDetect")
    ]
    augmenters_edges = [iaa.Canny(alpha=(0.01, 0.99), name="Canny")]
    augmenters_flip = [
        iaa.Fliplr(1.0, name="Fliplr"),
        iaa.Flipud(1.0, name="Flipud")
    ]
    augmenters_geometric = [
        iaa.Affine(scale=(0.9, 1.1),
                   translate_percent={
                       "x": (-0.05, 0.05),
                       "y": (-0.05, 0.05)
                   },
                   rotate=(-10, 10),
                   shear=(-10, 10),
                   order=0,
                   mode="constant",
                   cval=(0, 255),
                   name="Affine_order_0_constant"),
        iaa.Affine(scale=(0.9, 1.1),
                   translate_percent={
                       "x": (-0.05, 0.05),
                       "y": (-0.05, 0.05)
                   },
                   rotate=(-10, 10),
                   shear=(-10, 10),
                   order=1,
                   mode="constant",
                   cval=(0, 255),
                   name="Affine_order_1_constant"),
        iaa.Affine(scale=(0.9, 1.1),
                   translate_percent={
                       "x": (-0.05, 0.05),
                       "y": (-0.05, 0.05)
                   },
                   rotate=(-10, 10),
                   shear=(-10, 10),
                   order=3,
                   mode="constant",
                   cval=(0, 255),
                   name="Affine_order_3_constant"),
        iaa.Affine(scale=(0.9, 1.1),
                   translate_percent={
                       "x": (-0.05, 0.05),
                       "y": (-0.05, 0.05)
                   },
                   rotate=(-10, 10),
                   shear=(-10, 10),
                   order=1,
                   mode="edge",
                   cval=(0, 255),
                   name="Affine_order_1_edge"),
        iaa.Affine(scale=(0.9, 1.1),
                   translate_percent={
                       "x": (-0.05, 0.05),
                       "y": (-0.05, 0.05)
                   },
                   rotate=(-10, 10),
                   shear=(-10, 10),
                   order=1,
                   mode="constant",
                   cval=(0, 255),
                   backend="skimage",
                   name="Affine_order_1_constant_skimage"),
        # TODO AffineCv2
        iaa.PiecewiseAffine(scale=(0.01, 0.05),
                            nb_rows=4,
                            nb_cols=4,
                            order=1,
                            mode="constant",
                            name="PiecewiseAffine_4x4_order_1_constant"),
        iaa.PiecewiseAffine(scale=(0.01, 0.05),
                            nb_rows=4,
                            nb_cols=4,
                            order=0,
                            mode="constant",
                            name="PiecewiseAffine_4x4_order_0_constant"),
        iaa.PiecewiseAffine(scale=(0.01, 0.05),
                            nb_rows=4,
                            nb_cols=4,
                            order=1,
                            mode="edge",
                            name="PiecewiseAffine_4x4_order_1_edge"),
        iaa.PiecewiseAffine(scale=(0.01, 0.05),
                            nb_rows=8,
                            nb_cols=8,
                            order=1,
                            mode="constant",
                            name="PiecewiseAffine_8x8_order_1_constant"),
        iaa.PerspectiveTransform(scale=(0.01, 0.05),
                                 keep_size=False,
                                 name="PerspectiveTransform"),
        iaa.PerspectiveTransform(scale=(0.01, 0.05),
                                 keep_size=True,
                                 name="PerspectiveTransform_keep_size"),
        iaa.ElasticTransformation(
            alpha=(1, 10),
            sigma=(0.5, 1.5),
            order=0,
            mode="constant",
            cval=0,
            name="ElasticTransformation_order_0_constant"),
        iaa.ElasticTransformation(
            alpha=(1, 10),
            sigma=(0.5, 1.5),
            order=1,
            mode="constant",
            cval=0,
            name="ElasticTransformation_order_1_constant"),
        iaa.ElasticTransformation(
            alpha=(1, 10),
            sigma=(0.5, 1.5),
            order=1,
            mode="nearest",
            cval=0,
            name="ElasticTransformation_order_1_nearest"),
        iaa.ElasticTransformation(
            alpha=(1, 10),
            sigma=(0.5, 1.5),
            order=1,
            mode="reflect",
            cval=0,
            name="ElasticTransformation_order_1_reflect"),
        iaa.Rot90((1, 3), keep_size=False, name="Rot90"),
        iaa.Rot90((1, 3), keep_size=True, name="Rot90_keep_size")
    ]
    augmenters_pooling = [
        iaa.AveragePooling(kernel_size=(1, 16),
                           keep_size=False,
                           name="AveragePooling"),
        iaa.AveragePooling(kernel_size=(1, 16),
                           keep_size=True,
                           name="AveragePooling_keep_size"),
        iaa.MaxPooling(kernel_size=(1, 16), keep_size=False,
                       name="MaxPooling"),
        iaa.MaxPooling(kernel_size=(1, 16),
                       keep_size=True,
                       name="MaxPooling_keep_size"),
        iaa.MinPooling(kernel_size=(1, 16), keep_size=False,
                       name="MinPooling"),
        iaa.MinPooling(kernel_size=(1, 16),
                       keep_size=True,
                       name="MinPooling_keep_size"),
        iaa.MedianPooling(kernel_size=(1, 16),
                          keep_size=False,
                          name="MedianPooling"),
        iaa.MedianPooling(kernel_size=(1, 16),
                          keep_size=True,
                          name="MedianPooling_keep_size")
    ]
    augmenters_segmentation = [
        iaa.Superpixels(p_replace=(0.05, 1.0),
                        n_segments=(10, 100),
                        max_size=64,
                        interpolation="cubic",
                        name="Superpixels_max_size_64_cubic"),
        iaa.Superpixels(p_replace=(0.05, 1.0),
                        n_segments=(10, 100),
                        max_size=64,
                        interpolation="linear",
                        name="Superpixels_max_size_64_linear"),
        iaa.Superpixels(p_replace=(0.05, 1.0),
                        n_segments=(10, 100),
                        max_size=128,
                        interpolation="linear",
                        name="Superpixels_max_size_128_linear"),
        iaa.Superpixels(p_replace=(0.05, 1.0),
                        n_segments=(10, 100),
                        max_size=224,
                        interpolation="linear",
                        name="Superpixels_max_size_224_linear"),
        iaa.UniformVoronoi(n_points=(250, 1000), name="UniformVoronoi"),
        iaa.RegularGridVoronoi(n_rows=(16, 31),
                               n_cols=(16, 31),
                               name="RegularGridVoronoi"),
        iaa.RelativeRegularGridVoronoi(n_rows_frac=(0.07, 0.14),
                                       n_cols_frac=(0.07, 0.14),
                                       name="RelativeRegularGridVoronoi"),
    ]
    augmenters_size = [
        iaa.Resize((0.8, 1.2), interpolation="nearest", name="Resize_nearest"),
        iaa.Resize((0.8, 1.2), interpolation="linear", name="Resize_linear"),
        iaa.Resize((0.8, 1.2), interpolation="cubic", name="Resize_cubic"),
        iaa.CropAndPad(percent=(-0.2, 0.2),
                       pad_mode="constant",
                       pad_cval=(0, 255),
                       keep_size=False,
                       name="CropAndPad"),
        iaa.CropAndPad(percent=(-0.2, 0.2),
                       pad_mode="edge",
                       pad_cval=(0, 255),
                       keep_size=False,
                       name="CropAndPad_edge"),
        iaa.CropAndPad(percent=(-0.2, 0.2),
                       pad_mode="constant",
                       pad_cval=(0, 255),
                       name="CropAndPad_keep_size"),
        iaa.Pad(percent=(0.05, 0.2),
                pad_mode="constant",
                pad_cval=(0, 255),
                keep_size=False,
                name="Pad"),
        iaa.Pad(percent=(0.05, 0.2),
                pad_mode="edge",
                pad_cval=(0, 255),
                keep_size=False,
                name="Pad_edge"),
        iaa.Pad(percent=(0.05, 0.2),
                pad_mode="constant",
                pad_cval=(0, 255),
                name="Pad_keep_size"),
        iaa.Crop(percent=(0.05, 0.2), keep_size=False, name="Crop"),
        iaa.Crop(percent=(0.05, 0.2), name="Crop_keep_size"),
        iaa.PadToFixedSize(width=width + 10,
                           height=height + 10,
                           pad_mode="constant",
                           pad_cval=(0, 255),
                           name="PadToFixedSize"),
        iaa.CropToFixedSize(width=width - 10,
                            height=height - 10,
                            name="CropToFixedSize"),
        iaa.KeepSizeByResize(iaa.CropToFixedSize(height=height - 10,
                                                 width=width - 10),
                             interpolation="nearest",
                             name="KeepSizeByResize_CropToFixedSize_nearest"),
        iaa.KeepSizeByResize(iaa.CropToFixedSize(height=height - 10,
                                                 width=width - 10),
                             interpolation="linear",
                             name="KeepSizeByResize_CropToFixedSize_linear"),
        iaa.KeepSizeByResize(iaa.CropToFixedSize(height=height - 10,
                                                 width=width - 10),
                             interpolation="cubic",
                             name="KeepSizeByResize_CropToFixedSize_cubic"),
    ]
    augmenters_weather = [
        iaa.FastSnowyLandscape(lightness_threshold=(100, 255),
                               lightness_multiplier=(1.0, 4.0),
                               name="FastSnowyLandscape"),
        iaa.Clouds(name="Clouds"),
        iaa.Fog(name="Fog"),
        iaa.CloudLayer(intensity_mean=(196, 255),
                       intensity_freq_exponent=(-2.5, -2.0),
                       intensity_coarse_scale=10,
                       alpha_min=0,
                       alpha_multiplier=(0.25, 0.75),
                       alpha_size_px_max=(2, 8),
                       alpha_freq_exponent=(-2.5, -2.0),
                       sparsity=(0.8, 1.0),
                       density_multiplier=(0.5, 1.0),
                       name="CloudLayer"),
        iaa.Snowflakes(name="Snowflakes"),
        iaa.SnowflakesLayer(density=(0.005, 0.075),
                            density_uniformity=(0.3, 0.9),
                            flake_size=(0.2, 0.7),
                            flake_size_uniformity=(0.4, 0.8),
                            angle=(-30, 30),
                            speed=(0.007, 0.03),
                            blur_sigma_fraction=(0.0001, 0.001),
                            name="SnowflakesLayer")
    ]

    augmenters = (augmenters_meta + augmenters_arithmetic + augmenters_blend +
                  augmenters_blur + augmenters_color + augmenters_contrast +
                  augmenters_convolutional + augmenters_edges +
                  augmenters_flip + augmenters_geometric + augmenters_pooling +
                  augmenters_segmentation + augmenters_size +
                  augmenters_weather)

    if only_augmenters is not None:
        augmenters_reduced = []
        for augmenter in augmenters:
            if any([
                    re.search(pattern, augmenter.name)
                    for pattern in only_augmenters
            ]):
                augmenters_reduced.append(augmenter)
        augmenters = augmenters_reduced

    return augmenters
aug3 = aug = iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0))

aug4 = iaa.AdditiveLaplaceNoise(scale=(0, 0.2*255))
aug5 = iaa.AdditivePoissonNoise(scale)
aug6 = iaa.Multiply((0.5, 1.5), per_channel=0.5)
aug7 = iaa.Cutout(nb_iterations=2, size=0.05)

aug8 = iaa.Cutout(fill_mode="constant", size=0.05, cval=255)

aug9 = iaa.Cutout(fill_mode="gaussian", fill_per_channel=True, size=0.05)
aug10 = iaa.Dropout(p=(0, 0.05))
aug11 = iaa.CoarseDropout((0.0, 0.05), size_percent=(0.02, 0.2))
aug12 = iaa.Dropout2d(p=0.05, nb_keep_channels=0)
aug13 = iaa.ImpulseNoise(0.1)
aug14 = iaa.CoarseSaltAndPepper(0.05, size_percent=(0.01, 0.6))
aug15 = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.2))
# aug16 = iaa.Invert(0.25, per_channel=0.4)
# aug17 = iaa.Invert(0.1)
# aug18 = iaa.Solarize(0.05, threshold=(32, 128))
# aug19 = iaa.JpegCompression(compression=(95, 99))
aug20 = iaa.GaussianBlur(sigma=(0.0, 3.0)) # blur
aug21 = iaa.AverageBlur(k=((1, 5), (1, 3)))

aug22 = iaa.MotionBlur(k=3)

# aug23 = iaa.BlendAlpha(
#     (0.0, 0.5),
#     iaa.Affine(rotate=(0, 0)),
#     per_channel=0.5)
# aug24 = iaa.BlendAlpha(
#     (0.0, 0.5),
Exemple #12
0
    def get_aug(self):
        #sometimes_bg = lambda aug: iaa.Sometimes(0.3, aug)
        sometimes_contrast = lambda aug: iaa.Sometimes(0.3, aug)
        sometimes_noise = lambda aug: iaa.Sometimes(0.6, aug)
        sometimes_blur = lambda aug: iaa.Sometimes(0.6, aug)
        sometimes_degrade_quality = lambda aug: iaa.Sometimes(0.9, aug)
        sometimes_blend = lambda aug: iaa.Sometimes(0.2, aug)

        seq = iaa.Sequential(
                [
                # crop some of the images by 0-30% of their height/width
                # Execute 0 to 4 of the following (less important) augmenters per
                    # image. Don't execute all of them, as that would often be way too
                    # strong.
    #             iaa.SomeOf((0, 4),
    #                     [ 
                # change the background color of some of the images chosing any one technique
#                sometimes_bg(iaa.OneOf([
#                            iaa.AddToHueAndSaturation((-60, 60)),
#                            iaa.Multiply((0.6, 1), per_channel=True),
#                            ])),
                #change the contrast of the input images chosing any one technique    
                sometimes_contrast(iaa.OneOf([
                            iaa.LinearContrast((0.5,1.5)),
                            iaa.SigmoidContrast(gain=(3, 5), cutoff=(0.4, 0.6)),
                            iaa.CLAHE(tile_grid_size_px=(3, 21)),
                            iaa.GammaContrast((0.5,1.0))
                            ])),

                #add noise to the input images chosing any one technique 
                sometimes_noise(iaa.OneOf([
                    iaa.AdditiveGaussianNoise(scale=(3,8)),
                    iaa.CoarseDropout((0.001,0.01), size_percent=0.5),
                    iaa.AdditiveLaplaceNoise(scale=(3,10)),
                    iaa.CoarsePepper((0.001,0.01), size_percent=(0.5)),
                    iaa.AdditivePoissonNoise(lam=(3.0,10.0)),
                    iaa.Pepper((0.001,0.01)),
                    iaa.Snowflakes(),
                    iaa.Dropout(0.01,0.01),
                    ])),

                #add blurring techniques to the input image
                sometimes_blur(iaa.OneOf([
                    iaa.AverageBlur(k=(3)),
                    iaa.GaussianBlur(sigma=(1.0)),
                    ])),

                # add techniques to degrade the iamge quality
                sometimes_degrade_quality(iaa.OneOf([
                            iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),
                            iaa.Sharpen(alpha=(0.5), lightness=(0.75,1.5)),
                            iaa.BlendAlphaSimplexNoise(
                            foreground=iaa.Multiply(iap.Choice([1.5]), per_channel=False)
                            )
                            ])),

                # blend some patterns in the background    
                sometimes_blend(iaa.OneOf([
                            iaa.BlendAlpha(
                            factor=(0.6,0.8),
                            foreground=iaa.Sharpen(1.0, lightness=1),

                            background=iaa.CoarseDropout(p=0.1, size_px=np.random.randint(30))),

                            iaa.BlendAlphaFrequencyNoise(exponent=(-4),
                                       foreground=iaa.Multiply(iap.Choice([0.5]), per_channel=False)
                                       ),
                            iaa.BlendAlphaSimplexNoise(
                            foreground=iaa.Multiply(iap.Choice([0.5]), per_channel=True)
                            )
                      ])), 

                    ])
        return seq
Exemple #13
0
        transformed_image = transform(image=image)['image']

    elif augmentation == 'salt':
        transform = iaa.Salt(0.1)
        transformed_image = transform(image=image)

    elif augmentation == 'coarse_salt':
        transform = iaa.CoarseSalt(0.05, size_percent=(0.01, 0.1))
        transformed_image = transform(image=image)

    elif augmentation == 'pepper':
        transform = iaa.Pepper(0.1)
        transformed_image = transform(image=image)

    elif augmentation == 'coarse_pepper':
        transform = iaa.CoarsePepper(0.05, size_percent=(0.01, 0.1))
        transformed_image = transform(image=image)

    elif augmentation == 'salt_and_papper':
        transform = iaa.SaltAndPepper(0.1)
        transformed_image = transform(image=image)

    elif augmentation == 'coarse_salt_and_papper':
        transform = iaa.CoarseSaltAndPepper(0.05, size_percent=(0.01, 0.1))
        transformed_image = transform(image=image)

    elif augmentation == 'impulse_noise':
        transform = iaa.ImpulseNoise(0.1)
        transformed_image = transform(image=image)

    elif augmentation == 'replace_elementwise':
Exemple #14
0
    image_data = []
    idx = 0
    from_idx = 0

    aug1 = iaa.SomeOf(
        (0, 1),
        [
            # iaa.Grayscale(alpha=(0, 1.0)),
            iaa.GammaContrast(per_channel=True, gamma=(0, 1.75))
        ])
    aug2 = iaa.SomeOf(
        (2, 7),
        [
            iaa.Crop(px=(0, 21)),
            iaa.CoarsePepper(p=(0, 0.1), size_percent=(0.6, 0.3)),
            iaa.Dropout(p=(0, 0.2)),
            iaa.SomeOf((1, 3), [
                iaa.GaussianBlur(sigma=(1.2, 5)),
                iaa.AverageBlur(k=(2, 7)),
                iaa.MotionBlur(angle=(72, 288), k=(3, 13))
            ]),
            iaa.AdditiveGaussianNoise(scale=0.01 * 255),
            # iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.2, 1.0)),
            # iaa.Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.0)),
            iaa.Add((-60, 60)),
            iaa.AddElementwise((-20, 20)),
            iaa.MultiplyElementwise((.5, 2)),
            iaa.ContrastNormalization((0.5, 1.5)),
            iaa.Affine(shear=(-5, 5)),
            iaa.Affine(rotate=(-3, 3))
class AugmentationScheme:

    # Dictionary containing all possible augmentation functions
    Augmentations = {

        # Convert images to HSV, then increase each pixel's Hue (H), Saturation (S) or Value/lightness (V) [0, 1, 2]
        # value by an amount in between lo and hi:
        "HSV":
        lambda channel, lo, hi: iaa.WithColorspace(
            to_colorspace="HSV",
            from_colorspace="RGB",
            children=iaa.WithChannels(channel, iaa.Add((lo, hi)))),

        # The augmenter first transforms images to HSV color space, then adds random values (lo to hi)
        # to the H and S channels and afterwards converts back to RGB.
        # (independently per channel and the same value for all pixels within that channel)
        "Add_To_Hue_And_Saturation":
        lambda lo, hi: iaa.AddToHueAndSaturation((lo, hi), per_channel=True),

        # Increase each pixel’s channel-value (redness/greenness/blueness) [0, 1, 2] by value in between lo and hi:
        "Increase_Channel":
        lambda channel, lo, hi: iaa.WithChannels(channel, iaa.Add((lo, hi))),
        # Rotate each image’s channel [R=0, G=1, B=2] by value in between lo and hi degrees:
        "Rotate_Channel":
        lambda channel, lo, hi: iaa.WithChannels(channel,
                                                 iaa.Affine(rotate=(lo, hi))),

        # Augmenter that never changes input images (“no operation”).
        "No_Operation":
        iaa.Noop(),

        # Pads images, i.e. adds columns/rows to them. Pads image by value in between lo and hi
        # percent relative to its original size (only accepts positive values in range[0, 1]):
        # If s_i is false, The value will be sampled once per image and used for all sides
        # (i.e. all sides gain/lose the same number of rows/columns)
        # NOTE: automatically resizes images back to their original size after it has augmented them.
        "Pad_Percent":
        lambda lo, hi, s_i: iaa.Pad(
            percent=(lo, hi), keep_size=True, sample_independently=s_i),

        # Pads images by a number of pixels between lo and hi
        # If s_i is false, The value will be sampled once per image and used for all sides
        # (i.e. all sides gain/lose the same number of rows/columns)
        "Pad_Pixels":
        lambda lo, hi, s_i: iaa.Pad(
            px=(lo, hi), keep_size=True, sample_independently=s_i),

        # Crops/cuts away pixels at the sides of the image.
        # Crops images by value in between lo and hi (only accepts positive values in range[0, 1]):
        # If s_i is false, The value will be sampled once per image and used for all sides
        # (i.e. all sides gain/lose the same number of rows/columns)
        # NOTE: automatically resizes images back to their original size after it has augmented them.
        "Crop_Percent":
        lambda lo, hi, s_i: iaa.Crop(
            percent=(lo, hi), keep_size=True, sample_independently=s_i),

        # Crops images by a number of pixels between lo and hi
        # If s_i is false, The value will be sampled once per image and used for all sides
        # (i.e. all sides gain/lose the same number of rows/columns)
        "Crop_Pixels":
        lambda lo, hi, s_i: iaa.Crop(
            px=(lo, hi), keep_size=True, sample_independently=s_i),

        # Flip/mirror percent (i.e 0.5) of the input images horizontally
        # The default probability is 0, so to flip all images, percent=1
        "Flip_lr":
        iaa.Fliplr(1),

        # Flip/mirror percent (i.e 0.5) of the input images vertically
        # The default probability is 0, so to flip all images, percent=1
        "Flip_ud":
        iaa.Flipud(1),

        # Completely or partially transform images to their superpixel representation.
        # Generate s_pix_lo to s_pix_hi superpixels per image. Replace each superpixel with a probability between
        # prob_lo and prob_hi with range[0, 1] (sampled once per image) by its average pixel color.
        "Superpixels":
        lambda prob_lo, prob_hi, s_pix_lo, s_pix_hi: iaa.Superpixels(
            p_replace=(prob_lo, prob_hi), n_segments=(s_pix_lo, s_pix_hi)),

        # Change images to grayscale and overlay them with the original image by varying strengths,
        # effectively removing alpha_lo to alpha_hi of the color:
        "Grayscale":
        lambda alpha_lo, alpha_hi: iaa.Grayscale(alpha=(alpha_lo, alpha_hi)),

        # Blur each image with a gaussian kernel with a sigma between sigma_lo and sigma_hi:
        "Gaussian_Blur":
        lambda sigma_lo, sigma_hi: iaa.GaussianBlur(sigma=(sigma_lo, sigma_hi)
                                                    ),

        # Blur each image using a mean over neighbourhoods that have random sizes,
        # which can vary between h_lo and h_hi in height and w_lo and w_hi in width:
        "Average_Blur":
        lambda h_lo, h_hi, w_lo, w_hi: iaa.AverageBlur(k=((h_lo, h_hi),
                                                          (w_lo, w_hi))),

        # Blur each image using a median over neighbourhoods that have a random size between lo x lo and hi x hi:
        "Median_Blur":
        lambda lo, hi: iaa.MedianBlur(k=(lo, hi)),

        # Sharpen an image, then overlay the results with the original using an alpha between alpha_lo and alpha_hi:
        "Sharpen":
        lambda alpha_lo, alpha_hi, lightness_lo, lightness_hi: iaa.
        Sharpen(alpha=(alpha_lo, alpha_hi),
                lightness=(lightness_lo, lightness_hi)),

        # Emboss an image, then overlay the results with the original using an alpha between alpha_lo and alpha_hi:
        "Emboss":
        lambda alpha_lo, alpha_hi, strength_lo, strength_hi: iaa.Emboss(
            alpha=(alpha_lo, alpha_hi), strength=(strength_lo, strength_hi)),

        # Detect edges in images, turning them into black and white images and
        # then overlay these with the original images using random alphas between alpha_lo and alpha_hi:
        "Detect_Edges":
        lambda alpha_lo, alpha_hi: iaa.EdgeDetect(alpha=(alpha_lo, alpha_hi)),

        # Detect edges having random directions between dir_lo and dir_hi (i.e (0.0, 1.0) = 0 to 360 degrees) in
        # images, turning the images into black and white versions and then overlay these with the original images
        # using random alphas between alpha_lo and alpha_hi:
        "Directed_edge_Detect":
        lambda alpha_lo, alpha_hi, dir_lo, dir_hi: iaa.DirectedEdgeDetect(
            alpha=(alpha_lo, alpha_hi), direction=(dir_lo, dir_hi)),

        # Add random values between lo and hi to images. In percent of all images the values differ per channel
        # (3 sampled value). In the rest of the images the value is the same for all channels:
        "Add":
        lambda lo, hi, percent: iaa.Add((lo, hi), per_channel=percent),

        # Adds random values between lo and hi to images, with each value being sampled per pixel.
        # In percent of all images the values differ per channel (3 sampled value). In the rest of the images
        # the value is the same for all channels:
        "Add_Element_Wise":
        lambda lo, hi, percent: iaa.AddElementwise(
            (lo, hi), per_channel=percent),

        # Add gaussian noise (aka white noise) to an image, sampled once per pixel from a normal
        # distribution N(0, s), where s is sampled per image and varies between lo and hi*255 for percent of all
        # images (sampled once for all channels) and sampled three (RGB) times (channel-wise)
        # for the rest from the same normal distribution:
        "Additive_Gaussian_Noise":
        lambda lo, hi, percent: iaa.AdditiveGaussianNoise(scale=(lo, hi),
                                                          per_channel=percent),

        # Multiply in percent of all images each pixel with random values between lo and hi and multiply
        # the pixels in the rest of the images channel-wise,
        # i.e. sample one multiplier independently per channel and pixel:
        "Multiply":
        lambda lo, hi, percent: iaa.Multiply((lo, hi), per_channel=percent),

        # Multiply values of pixels with possibly different values for neighbouring pixels,
        # making each pixel darker or brighter. Multiply each pixel with a random value between lo and hi:
        "Multiply_Element_Wise":
        lambda lo, hi, percent: iaa.MultiplyElementwise(
            (0.5, 1.5), per_channel=0.5),

        # Augmenter that sets a certain fraction of pixels in images to zero.
        # Sample per image a value p from the range lo<=p<=hi and then drop p percent of all pixels in the image
        # (i.e. convert them to black pixels), but do this independently per channel in percent of all images
        "Dropout":
        lambda lo, hi, percent: iaa.Dropout(p=(lo, hi), per_channel=percent),

        # Augmenter that sets rectangular areas within images to zero.
        # Drop d_lo to d_hi percent of all pixels by converting them to black pixels,
        # but do that on a lower-resolution version of the image that has s_lo to s_hi percent of the original size,
        # Also do this in percent of all images channel-wise, so that only the information of some
        # channels is set to 0 while others remain untouched:
        "Coarse_Dropout":
        lambda d_lo, d_hi, s_lo, s_hi, percent: iaa.CoarseDropout(
            (d_lo, d_hi), size_percent=(s_hi, s_hi), per_channel=percent),

        # Augmenter that inverts all values in images, i.e. sets a pixel from value v to 255-v.
        # For c_percent of all images, invert all pixels in these images channel-wise with probability=i_percent
        # (per image). In the rest of the images, invert i_percent of all channels:
        "Invert":
        lambda i_percent, c_percent: iaa.Invert(i_percent,
                                                per_channel=c_percent),

        # Augmenter that changes the contrast of images.
        # Normalize contrast by a factor of lo to hi, sampled randomly per image
        # and for percent of all images also independently per channel:
        "Contrast_Normalisation":
        lambda lo, hi, percent: iaa.ContrastNormalization(
            (lo, hi), per_channel=percent),

        # Scale images to a value of lo to hi percent of their original size but do this independently per axis:
        "Scale":
        lambda x_lo, x_hi, y_lo, y_hi: iaa.Affine(scale={
            "x": (x_lo, x_hi),
            "y": (y_lo, y_hi)
        }),

        # Translate images by lo to hi percent on x-axis and y-axis independently:
        "Translate_Percent":
        lambda x_lo, x_hi, y_lo, y_hi: iaa.Affine(translate_percent={
            "x": (x_lo, x_hi),
            "y": (y_lo, y_hi)
        }),

        # Translate images by lo to hi pixels on x-axis and y-axis independently:
        "Translate_Pixels":
        lambda x_lo, x_hi, y_lo, y_hi: iaa.Affine(translate_px={
            "x": (x_lo, x_hi),
            "y": (y_lo, y_hi)
        }),

        # Rotate images by lo to hi degrees:
        "Rotate":
        lambda lo, hi: iaa.Affine(rotate=(lo, hi)),

        # Shear images by lo to hi degrees:
        "Shear":
        lambda lo, hi: iaa.Affine(shear=(lo, hi)),

        # Augmenter that places a regular grid of points on an image and randomly moves the neighbourhood of
        # these point around via affine transformations. This leads to local distortions.
        # Distort images locally by moving points around, each with a distance v (percent relative to image size),
        # where v is sampled per point from N(0, z) z is sampled per image from the range lo to hi:
        "Piecewise_Affine":
        lambda lo, hi: iaa.PiecewiseAffine(scale=(lo, hi)),

        # Augmenter to transform images by moving pixels locally around using displacement fields.
        # Distort images locally by moving individual pixels around following a distortions field with
        # strength sigma_lo to sigma_hi. The strength of the movement is sampled per pixel from the range
        # alpha_lo to alpha_hi:
        "Elastic_Transformation":
        lambda alpha_lo, alpha_hi, sigma_lo, sigma_hi: iaa.
        ElasticTransformation(alpha=(alpha_lo, alpha_hi),
                              sigma=(sigma_lo, sigma_hi)),

        # Weather augmenters are computationally expensive and will not work effectively on certain data sets

        # Augmenter to draw clouds in images.
        "Clouds":
        iaa.Clouds(),

        # Augmenter to draw fog in images.
        "Fog":
        iaa.Fog(),

        # Augmenter to add falling snowflakes to images.
        "Snowflakes":
        iaa.Snowflakes(),

        # Replaces percent of all pixels in an image by either x or y
        "Replace_Element_Wise":
        lambda percent, x, y: iaa.ReplaceElementwise(percent, [x, y]),

        # Adds laplace noise (somewhere between gaussian and salt and peeper noise) to an image, sampled once per pixel
        # from a laplace distribution Laplace(0, s), where s is sampled per image and varies between lo and hi*255 for
        # percent of all images (sampled once for all channels) and sampled three (RGB) times (channel-wise)
        # for the rest from the same laplace distribution:
        "Additive_Laplace_Noise":
        lambda lo, hi, percent: iaa.AdditiveLaplaceNoise(scale=(lo, hi),
                                                         per_channel=percent),

        # Adds poisson noise (similar to gaussian but different distribution) to an image, sampled once per pixel from
        # a poisson distribution Poisson(s), where s is sampled per image and varies between lo and hi for percent of
        # all images (sampled once for all channels) and sampled three (RGB) times (channel-wise)
        # for the rest from the same poisson distribution:
        "Additive_Poisson_Noise":
        lambda lo, hi, percent: iaa.AdditivePoissonNoise(lam=(lo, hi),
                                                         per_channel=percent),

        # Adds salt and pepper noise to an image, i.e. some white-ish and black-ish pixels.
        # Replaces percent of all pixels with salt and pepper noise
        "Salt_And_Pepper":
        lambda percent: iaa.SaltAndPepper(percent),

        # Adds coarse salt and pepper noise to image, i.e. rectangles that contain noisy white-ish and black-ish pixels
        # Replaces percent of all pixels with salt/pepper in an image that has lo to hi percent of the input image size,
        # then upscales the results to the input image size, leading to large rectangular areas being replaced.
        "Coarse_Salt_And_Pepper":
        lambda percent, lo, hi: iaa.CoarseSaltAndPepper(percent,
                                                        size_percent=(lo, hi)),

        # Adds salt noise to an image, i.e white-ish pixels
        # Replaces percent of all pixels with salt noise
        "Salt":
        lambda percent: iaa.Salt(percent),

        # Adds coarse salt noise to image, i.e. rectangles that contain noisy white-ish pixels
        # Replaces percent of all pixels with salt in an image that has lo to hi percent of the input image size,
        # then upscales the results to the input image size, leading to large rectangular areas being replaced.
        "Coarse_Salt":
        lambda percent, lo, hi: iaa.CoarseSalt(percent, size_percent=(lo, hi)),

        # Adds Pepper noise to an image, i.e Black-ish pixels
        # Replaces percent of all pixels with Pepper noise
        "Pepper":
        lambda percent: iaa.Pepper(percent),

        # Adds coarse pepper noise to image, i.e. rectangles that contain noisy black-ish pixels
        # Replaces percent of all pixels with salt in an image that has lo to hi percent of the input image size,
        # then upscales the results to the input image size, leading to large rectangular areas being replaced.
        "Coarse_Pepper":
        lambda percent, lo, hi: iaa.CoarsePepper(percent,
                                                 size_percent=(lo, hi)),

        # In an alpha blending, two images are naively mixed. E.g. Let A be the foreground image, B be the background
        # image and a is the alpha value. Each pixel intensity is then computed as a * A_ij + (1-a) * B_ij.
        # Images passed in must be a numpy array of type (height, width, channel)
        "Blend_Alpha":
        lambda image_fg, image_bg, alpha: iaa.blend_alpha(
            image_fg, image_bg, alpha),

        # Blur/Denoise an image using a bilateral filter.
        # Bilateral filters blur homogeneous and textured areas, while trying to preserve edges.
        # Blurs all images using a bilateral filter with max distance d_lo to d_hi with ranges for sigma_colour
        # and sigma space being define by sc_lo/sc_hi and ss_lo/ss_hi
        "Bilateral_Blur":
        lambda d_lo, d_hi, sc_lo, sc_hi, ss_lo, ss_hi: iaa.BilateralBlur(
            d=(d_lo, d_hi),
            sigma_color=(sc_lo, sc_hi),
            sigma_space=(ss_lo, ss_hi)),

        # Augmenter that sharpens images and overlays the result with the original image.
        # Create a motion blur augmenter with kernel size of (kernel x kernel) and a blur angle of either x or y degrees
        # (randomly picked per image).
        "Motion_Blur":
        lambda kernel, x, y: iaa.MotionBlur(k=kernel, angle=[x, y]),

        # Augmenter to apply standard histogram equalization to images (similar to CLAHE)
        "Histogram_Equalization":
        iaa.HistogramEqualization(),

        # Augmenter to perform standard histogram equalization on images, applied to all channels of each input image
        "All_Channels_Histogram_Equalization":
        iaa.AllChannelsHistogramEqualization(),

        # Contrast Limited Adaptive Histogram Equalization (CLAHE). This augmenter applies CLAHE to images, a form of
        # histogram equalization that normalizes within local image patches.
        # Creates a CLAHE augmenter with clip limit uniformly sampled from [cl_lo..cl_hi], i.e. 1 is rather low contrast
        # and 50 is rather high contrast. Kernel sizes of SxS, where S is uniformly sampled from [t_lo..t_hi].
        # Sampling happens once per image. (Note: more parameters are available for further specification)
        "CLAHE":
        lambda cl_lo, cl_hi, t_lo, t_hi: iaa.CLAHE(
            clip_limit=(cl_lo, cl_hi), tile_grid_size_px=(t_lo, t_hi)),

        # Contrast Limited Adaptive Histogram Equalization (refer above), applied to all channels of the input images.
        # CLAHE performs histogram equalization within image patches, i.e. over local neighbourhoods
        "All_Channels_CLAHE":
        lambda cl_lo, cl_hi, t_lo, t_hi: iaa.AllChannelsCLAHE(
            clip_limit=(cl_lo, cl_hi), tile_grid_size_px=(t_lo, t_hi)),

        # Augmenter that changes the contrast of images using a unique formula (using gamma).
        # Multiplier for gamma function is between lo and hi,, sampled randomly per image (higher values darken image)
        # For percent of all images values are sampled independently per channel.
        "Gamma_Contrast":
        lambda lo, hi, percent: iaa.GammaContrast(
            (lo, hi), per_channel=percent),

        # Augmenter that changes the contrast of images using a unique formula (linear).
        # Multiplier for linear function is between lo and hi, sampled randomly per image
        # For percent of all images values are sampled independently per channel.
        "Linear_Contrast":
        lambda lo, hi, percent: iaa.LinearContrast(
            (lo, hi), per_channel=percent),

        # Augmenter that changes the contrast of images using a unique formula (using log).
        # Multiplier for log function is between lo and hi, sampled randomly per image.
        # For percent of all images values are sampled independently per channel.
        # Values around 1.0 lead to a contrast-adjusted images. Values above 1.0 quickly lead to partially broken
        # images due to exceeding the datatype’s value range.
        "Log_Contrast":
        lambda lo, hi, percent: iaa.LogContrast((lo, hi), per_channel=percent),

        # Augmenter that changes the contrast of images using a unique formula (sigmoid).
        # Multiplier for sigmoid function is between lo and hi, sampled randomly per image. c_lo and c_hi decide the
        # cutoff value that shifts the sigmoid function in horizontal direction (Higher values mean that the switch
        # from dark to light pixels happens later, i.e. the pixels will remain darker).
        # For percent of all images values are sampled independently per channel:
        "Sigmoid_Contrast":
        lambda lo, hi, c_lo, c_hi, percent: iaa.SigmoidContrast(
            (lo, hi), (c_lo, c_hi), per_channel=percent),

        # Augmenter that calls a custom (lambda) function for each batch of input image.
        # Extracts Canny Edges from images (refer to description in CO)
        # Good default values for min and max are 100 and 200
        'Custom_Canny_Edges':
        lambda min_val, max_val: iaa.Lambda(func_images=CO.Edges(
            min_value=min_val, max_value=max_val)),
    }

    # AugmentationScheme objects require images and labels.
    # 'augs' is a list that contains all data augmentations in the scheme
    def __init__(self):
        self.augs = [iaa.Flipud(1)]

    def __call__(self, image):
        image = np.array(image)
        aug_scheme = iaa.Sometimes(
            0.5,
            iaa.SomeOf(random.randrange(1,
                                        len(self.augs) + 1),
                       self.augs,
                       random_order=True))
        aug_img = self.aug_scheme.augment_image(image)
        # fixes negative strides
        aug_img = aug_img[..., ::1] - np.zeros_like(aug_img)
        return aug_img