예제 #1
0
    def test_get_gaussian_kernels(self):

        sigmas = (1.0, 1.5)
        kernel_size = 3

        # check kernel
        kernel_tfaug = AugmentImg()._get_gaussian_kernels(sigmas, kernel_size)

        kernel_tfa = tfa.image.filters._get_gaussian_kernel(
            sigmas[0], kernel_size)
        gaussian_kernel_x = tf.reshape(kernel_tfa, [1, kernel_size])
        gaussian_kernel_y = tf.reshape(kernel_tfa, [kernel_size, 1])
        kernel_tfa = tfa.image.filters._get_gaussian_kernel_2d(
            gaussian_kernel_y, gaussian_kernel_x)

        assert (kernel_tfa == kernel_tfaug[0]
                ).numpy().all(), 'invalid blur kernel1'

        kernel_tfa = tfa.image.filters._get_gaussian_kernel(
            sigmas[1], kernel_size)
        gaussian_kernel_x = tf.reshape(kernel_tfa, [1, kernel_size])
        gaussian_kernel_y = tf.reshape(kernel_tfa, [kernel_size, 1])
        kernel_tfa = tfa.image.filters._get_gaussian_kernel_2d(
            gaussian_kernel_y, gaussian_kernel_x)

        assert (kernel_tfa == kernel_tfaug[1]
                ).numpy().all(), 'invalid blur kernel2'
예제 #2
0
    def test_standardization(self):

        im = np.arange(3 * 4 * 5 * 3, dtype=np.uint8).reshape(3, 4, 5, 3)
        tn = tf.Variable(im)

        ret = AugmentImg()._standardization(tn, 3).numpy()

        mean_axis = np.mean(ret, axis=(1, 2, 3))
        std_axis = np.std(ret, axis=(1, 2, 3))

        assert np.allclose(mean_axis, 0), 'standardization failed'
        assert np.allclose(std_axis, 1), 'standardization failed'
예제 #3
0
    def test_add_blur(self):

        # test lenna
        batch_size = 2
        img = np.array(Image.open(DATADIR + 'Lenna.png').resize((50, 50)))
        imgs = np.tile(img, (batch_size, 1, 1, 1))

        imgs = np.array([[(i + j) % 2 for i in range(20)]
                         for j in range(20)]) * 255
        imgs = np.repeat(imgs[np.newaxis, :, :, np.newaxis], batch_size, 0)
        imgs = np.repeat(imgs, 3, 3)
        sigmas = (1.0, 0.5)

        sigmas = tf.random.uniform([batch_size], 0, 0.5, seed=0)

        kernel_size = 5

        plt.imshow(imgs[1].astype(np.uint8))

        padimg = np.pad(imgs, [[0, 0], [kernel_size // 2, kernel_size // 2],
                               [kernel_size // 2, kernel_size // 2], [0, 0]],
                        mode='symmetric')

        plt.imshow(padimg[0])

        ret = AugmentImg()._add_blur(imgs, sigmas, kernel_size, 3)
        plt.imshow(ret.numpy()[0].astype(np.uint8))
        plt.imshow(ret.numpy()[1].astype(np.uint8))

        kernels = AugmentImg()._get_gaussian_kernels(sigmas, kernel_size)
        kernels = tf.repeat(tf.expand_dims(kernels, 3), 3, 3)

        testval = tf.reduce_sum(padimg[:, 0:kernel_size, 0:kernel_size, :] *
                                kernels,
                                axis=(1, 2))
        assert np.allclose(ret[:, 0, 0, :], testval), 'invalid blur'
        testval = tf.reduce_sum(
            padimg[:, 4:4 + kernel_size, 3:3 + kernel_size, :] * kernels,
            axis=(1, 2))
        assert np.allclose(ret[:, 4, 3, :], testval), 'invalid blur'
예제 #4
0
def learn_multi_seginout_fromtfds():

    import tensorflow_datasets as tfds
    dataset, info = tfds.load('oxford_iiit_pet:3.*.*', with_info=True)

    BATCH_SIZE = 2
    RESIZE = [214, 214]
    # create dataset from tensorflow_dataset
    augprm = AugmentImg.params(random_crop=[64, 64], random_contrast=[.5, 1.5])

    train = dataset['train']
    valid = dataset['test']

    def prepare_ds(train):
        def resize(dataset):
            img = tf.image.resize(dataset['image'], RESIZE)
            msk = tf.image.resize(dataset['segmentation_mask'], RESIZE)
            return (img, img, msk, msk)

        extracted = train.map(resize).batch(BATCH_SIZE)
        auged = DatasetCreator(10, BATCH_SIZE,
                               **augprm._asdict()).from_dataset(
                                   extracted, 'segmentation', 2)

        def cat(data):
            return ({
                'in1': data['image_in0'],
                'in2': data['image_in1']
            }, tf.concat([data['label_in0'], data['label_in1']], axis=-1))

        return auged.map(cat)

    ds_train = prepare_ds(train)
    ds_valid = prepare_ds(valid)

    # define the model
    model = def_branch_unet(tuple(augprm.random_crop + [3]),
                            tuple(augprm.random_crop + [3]),
                            2)  # 2 - input and concated mask

    model.compile(
        optimizer=tf.keras.optimizers.Adam(0.002),
        loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
        metrics=['categorical_accuracy'])

    model.fit(ds_train,
              epochs=10,
              validation_data=ds_valid,
              steps_per_epoch=info.splits['train'].num_examples // BATCH_SIZE,
              validation_steps=info.splits['test'].num_examples // BATCH_SIZE)
예제 #5
0
    def test_cnv2d_minibatchwise(self):

        imgs = np.arange(2 * 5 * 4 * 3, dtype=np.float32).reshape(2, 5, 4, 3)

        kernels = np.arange(2 * 3 * 3 * 1,
                            dtype=np.float32).reshape(2, 3, 3, 1)
        kernels = np.repeat(kernels, 3, axis=3)

        ret = AugmentImg()._cnv2d_minibatchwise(imgs, kernels)

        assert np.allclose((imgs[:, 1:4, 1:4, :] * kernels).sum(axis=(1, 2)),
                           ret[:, 2, 2, :].numpy()), 'calc error1'

        assert np.allclose((imgs[:, 2:5, 0:3, :] * kernels).sum(axis=(1, 2)),
                           ret[:, 3, 1, :].numpy()), 'calc error2'
예제 #6
0
    def test_set_seed(self):

        BATCH_SIZE = 2
        img = np.array(Image.open(DATADIR + 'Lenna.png'))
        img1 = np.tile(img, (BATCH_SIZE, 1, 1, 1))
        img2 = img1.copy()

        # data augmentation configurations:
        DATAGEN_CONF = {
            'standardize': False,
            'resize': [100, 100],
            'random_rotation': 5,
            'random_flip_left_right': True,
            'random_flip_up_down': False,
            'random_shift': [25, 25],
            'random_zoom': [0.2, 0.2],
            'random_shear': [5, 5],
            'random_brightness': 0.2,
            'random_hue': 0.00001,
            'random_contrast': [0.6, 1.4],
            'random_crop': None,
            'random_noise': 5,
            'random_saturation': [0.5, 1.5],
            'input_shape': [BATCH_SIZE, 512, 512, 3],
            'num_transforms': 10
        }

        seeds = np.random.uniform(0, 100, (int(1e6)))
        aug1 = AugmentImg(**DATAGEN_CONF, seeds=seeds, training=True)

        aug2 = AugmentImg(**DATAGEN_CONF, seeds=seeds, training=True)

        ret1 = aug1(img1)
        ret2 = aug2(img2)

        assert (ret1 == ret2).numpy().all(), "aug is not same"
예제 #7
0
def test_aug_prm(prm, name, testimg, testlbl, dstdir):    

    BATCH_SIZE = 10

    with Image.open(testimg).convert('RGB') as img:
        image = np.asarray(img)
    image = np.tile(image, (BATCH_SIZE, 1, 1, 1))

    with Image.open(testlbl).convert('RGB') as label:
        label = np.asarray(label)
    if label.data.ndim == 2:
        # if label image have no channel, add channel axis
        label = label[:, :, np.newaxis]
    label = np.tile(label, (BATCH_SIZE, 1, 1, 1))
    
    func = AugmentImg(**prm._asdict())

    img, lbl = func(image, label)

    if prm.resize and not prm.random_crop:
        assert img.shape == [BATCH_SIZE] + \
            list(prm.resize) + [3]
        assert lbl.shape == [BATCH_SIZE] + \
            list(prm.resize) + [3]
    elif prm.random_crop or prm.central_crop:
        shape = prm.central_crop or prm.random_crop
        assert img.shape == [BATCH_SIZE] + \
            list(shape) + [3]
        assert lbl.shape == [BATCH_SIZE] + \
            list(shape) + [3]
    else:
        assert img.shape == image.shape
        assert lbl.shape == label.shape

    # adjust value range to display images : canceling standardize effect.
    # this cause color change
    img = img.numpy()
    lbl = lbl.numpy()
    if prm.standardize:
        img = adjust_img_range(img)
        lbl = adjust_img_range(lbl)
    else:
        img = img.astype(np.uint8)
        lbl = lbl.astype(np.uint8)

    plot_dsresult(((img, lbl),), BATCH_SIZE,
                       1, dstdir+name+'.png', 
                       plot_label=True)
예제 #8
0
    def test_central_crop(self):

        # image and lbl which you want to test
        testimg = DATADIR + 'Lenna.png'
        testlbl = DATADIR + 'Lenna.png'

        BATCH_SIZE = 10

        with Image.open(testimg).convert('RGB') as img:
            image = np.asarray(img)
        image = np.tile(image, (BATCH_SIZE, 1, 1, 1))

        with Image.open(testlbl).convert('RGB') as label:
            label = np.asarray(label)
        if label.data.ndim == 2:
            # if label image have no channel, add channel axis
            label = label[:, :, np.newaxis]
        label = np.tile(label, (BATCH_SIZE, 1, 1, 1))

        training = False

        func = AugmentImg(standardize=False,
                          random_flip_left_right=False,
                          random_flip_up_down=False,
                          random_shift=None,
                          random_zoom=None,
                          random_brightness=False,
                          random_saturation=False,
                          central_crop=[256, 128],
                          training=training,
                          num_transforms=10)

        img, lbl = func(image, label)
        lbl_offset_y = (label.shape[1] - 256) // 2
        lbl_offset_x = (label.shape[1] - 128) // 2

        self.assertEqual(img.shape, (10, 256, 128, 3))
        self.assertEqual(lbl.shape, (10, 256, 128, 3))

        self.assertTrue(
            np.allclose(
                lbl.numpy(), label[:, lbl_offset_y:lbl_offset_y + 256,
                                   lbl_offset_x:lbl_offset_x + 128, :]))
        self.assertTrue(
            np.allclose(
                img.numpy(), image[:, lbl_offset_y:lbl_offset_y + 256,
                                   lbl_offset_x:lbl_offset_x + 128, :]))
예제 #9
0
    def test_tfdata_vertual(self):

        BATCH_SIZE = 10
        image = np.arange(5**3).reshape(5, 5, 5).astype(np.float32)
        image = np.tile(image, (BATCH_SIZE, 1, 1, 1))

        random_zoom = (.1, .1)
        random_shift = (.1, .1)
        random_saturation = None
        training = True
        aug_fun = AugmentImg(standardize=True,
                             random_flip_left_right=True,
                             random_flip_up_down=True,
                             random_shift=random_shift,
                             random_zoom=random_zoom,
                             random_brightness=0.2,
                             random_saturation=random_saturation,
                             training=training,
                             num_transforms=100)

        image = image.astype(np.float32)

        test_cases = {'4dim': image, '3dim': image[:, :, :, 0]}

        for no, case in enumerate(test_cases):
            with self.subTest(case=case):
                image = test_cases[case]

                def py_function(x):
                    return x

                def aug_fun(x):
                    return x

                def func(x):
                    return tf.py_function(py_function, [x], tf.float32)

                ds = tf.data.Dataset.from_tensors(image).map(func).map(aug_fun)

                tf.print('get data')
                img = next(ds.take(1).__iter__())
예제 #10
0
    def test_private_functions_in_DatasetCreator(self):

        BATCH_SIZE = 2
        img_org = Image.open(DATADIR + 'Lenna.png')
        shape = list(np.array(img_org).shape)
        fp32 = np.array(Image.open(DATADIR + 'Lenna.png')).astype(
            np.float32) // 256
        Image.fromarray(fp32[:, :, 0]).save(DATADIR + 'Lenna.tif')

        clslabels = list(range(10))
        flist_imgs = [(DATADIR + 'Lenna.png', DATADIR + 'Lenna.tif',
                       DATADIR + 'Lenna.png') for i in range(10)]

        path_tfrecord = DATADIR + 'test_3_inimgs.tfrecord'
        TfrecordConverter().from_path_label(flist_imgs, clslabels,
                                            path_tfrecord)

        dc = DatasetCreator(1, BATCH_SIZE, training=True)
        path_tfrecords = [path_tfrecord, path_tfrecord]
        (ds, num_img, label_type, imgs_dtype, imgs_shape, labels_shape,
         labels_dtype) = dc._get_ds_tfrecord(1, path_tfrecords)

        # test _set_formats
        example_formats = dc._gen_example(label_type, labels_dtype, imgs_dtype,
                                          imgs_shape)
        decoders = dc._decoder_creator(label_type, labels_dtype, labels_shape,
                                       imgs_dtype, imgs_shape)

        assert example_formats['image_in0'].dtype == tf.string
        assert example_formats['image_in1'].dtype == tf.string
        assert example_formats['image_in2'].dtype == tf.string
        assert example_formats['label_in0'].dtype == tf.int64

        ds_decoded = (ds.batch(BATCH_SIZE).apply(
            tf.data.experimental.parse_example_dataset(example_formats)).map(
                decoders))

        # define augmentation
        datagen_confs = {'random_rotation': 5, 'num_transforms': 5}

        inputs_shape, input_label_shape = dc._get_inputs_shapes(
            ds_decoded, label_type, len(imgs_dtype))

        seeds = np.random.uniform(0, 2**32, (int(1e6)))
        if len(imgs_dtype) > 1:  # multiple input
            aug_funs = []
            for shape in inputs_shape:
                datagen_confs['input_shape'] = shape
                aug_funs.append(AugmentImg(**datagen_confs, seeds=seeds))
            if label_type == 'segmentation':
                datagen_confs['input_shape'] = input_label_shape
                aug_funs.append(AugmentImg(**datagen_confs, seeds=seeds))
            elif label_type == 'class':
                aug_funs.append(lambda x: x)

            aug_fun = dc._apply_aug(aug_funs)

        ds_aug = ds_decoded.map(aug_fun)

        ds_out = ds_aug.map(dc._ds_to_dict(example_formats.keys()))
        test_ret = next(iter(ds_out))

        assert test_ret['image_in0'].shape == [BATCH_SIZE, *imgs_shape[0]
                                               ], "invalid image 0 size"
        assert test_ret['image_in1'].shape == [BATCH_SIZE, *imgs_shape[1]
                                               ], "invalid image 1 size"
        assert test_ret['image_in2'].shape == [BATCH_SIZE, *imgs_shape[2]
                                               ], "invalid image 2 size"
        assert test_ret['label_in0'].shape == BATCH_SIZE, "invalid label size"