Пример #1
0
dataset_train = DetectorDataset(image_fps_train, image_annotations, ORIG_SIZE,
                                ORIG_SIZE)
dataset_train.prepare()

# prepare the validation dataset
dataset_val = DetectorDataset(image_fps_val, image_annotations, ORIG_SIZE,
                              ORIG_SIZE)
dataset_val.prepare()

dataset_val_test = DetectorDataset(image_fps_val_test, image_annotations,
                                   ORIG_SIZE, ORIG_SIZE)
dataset_val_test.prepare()

# Image augmentation (light but constant)
augmentation = iaa.Sequential([
    iaa.Fliplr(0.1),
    iaa.OneOf([  ## geometric transform
        iaa.Affine(
            scale={
                "x": (0.98, 1.02),
                "y": (0.98, 1.04)
            },
            translate_percent={
                "x": (-0.02, 0.02),
                "y": (-0.04, 0.04)
            },
            rotate=(-2, 2),
            shear=(-1, 1),
        ),
        iaa.PiecewiseAffine(scale=(0.01, 0.025)),
    ]),
Пример #2
0
def augmentation_seq():
    """
    Note imgaug expects RGB not BGR
    :return:
    """
    # TODO this is a reference of transforms that were helpful in keras so use them in imgaug
    # rescale=None,
    # shear_range=0.2,
    # rotation_range=0.2,
    # width_shift_range=0.3,
    # height_shift_range=0.3,
    # zoom_range=[.85, 1.2],
    # fill_mode='nearest',
    # horizontal_flip=False,
    # vertical_flip=False,

    seq_simple = iaa.Sequential([
        iaa.Affine(rotate=(-45, 45))
        # iaa.Crop(px=(1, 16), keep_size=False),
        # iaa.Fliplr(0.5),
        # iaa.GaussianBlur(sigma=(0, 3.0))
    ])

    _sometimes = lambda aug: iaa.Sometimes(0.85, aug)
    seq_complex = iaa.Sequential(
        [
            # apply the following augmenters to most images
            iaa.Fliplr(0.5),  # horizontally flip 50% of all images
            iaa.Flipud(0.2),  # vertically flip 20% of all images
            # crop images by -5% to 10% of their height/width
            # _sometimes(iaa.CropAndPad(
            #     percent=(-0.05, 0.1),
            #     pad_mode=ia.ALL,
            #     pad_cval=(0, 255)
            # )),
            _sometimes(iaa.Resize((0.5, 3))),
            # execute 0 to 5 of the following (less important) augmenters per image
            # don't execute all of them, as that would often be way too strong
            iaa.SomeOf(
                (3, 5),
                [
                    # _sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),
                    # convert images into their superpixel representation
                    iaa.OneOf([
                        iaa.GaussianBlur(
                            (0, 3.0
                             )),  # blur images with a sigma between 0 and 3.0
                        iaa.AverageBlur(k=(2, 7)),
                        # blur image using local means with kernel sizes between 2 and 7
                        iaa.MedianBlur(k=(3, 11)),
                        # blur image using local medians with kernel sizes between 2 and 7
                    ]),
                    iaa.Sharpen(alpha=(0, 1.0),
                                lightness=(0.75, 1.5)),  # sharpen images
                    # iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),  # emboss images
                    # # search either for all edges or for directed edges,
                    # # blend the result with the original image using a blobby mask
                    # iaa.SimplexNoiseAlpha(iaa.OneOf([
                    #     iaa.EdgeDetect(alpha=(0.5, 1.0)),
                    #     iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
                    # ])),
                    # iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
                    # add gaussian noise to images
                    # iaa.OneOf([
                    #     iaa.Dropout((0.01, 0.1), per_channel=0.5),  # randomly remove up to 10% of the pixels
                    #     iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
                    # ]),
                    # iaa.Invert(0.05, per_channel=True),  # invert color channels
                    # iaa.Add((-10, 10), per_channel=0.5),
                    # change brightness of images (by -10 to 10 of original value)
                    iaa.AddToHueAndSaturation(
                        (-5, 5)),  # change hue and saturation
                    # either change the brightness of the whole image (sometimes
                    # per channel) or change the brightness of subareas
                    # iaa.OneOf([
                    #     iaa.Multiply((0.5, 1.5), per_channel=0.5),
                    #     iaa.FrequencyNoiseAlpha(
                    #         exponent=(-1, 0),
                    #         first=iaa.Multiply((0.85, 1.15), per_channel=True),
                    #         second=iaa.LinearContrast((0.8, 1.2))
                    #     )
                    # ]),
                    # iaa.LinearContrast((0.75, 1.25), per_channel=0.2),  # improve or worsen the contrast
                    # iaa.Grayscale(alpha=(0.0, 1.0)),
                    # _sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)),
                    # # move pixels locally around (with random strengths)
                    # _sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
                    # # sometimes move parts of the image around
                    # _sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
                ],
                random_order=True),
            _sometimes(
                iaa.Affine(
                    # scale={"x": (0.5, 2.5), "y": (0.5, 2.5)},
                    # scale images to 80-120% of their size, individually per axis
                    # translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},  # translate by -20 to +20 percent (per axis)
                    # shear=(-16, 16),  # shear by -16 to +16 degrees
                    # order=[0, 1],  # use nearest neighbour or bilinear interpolation (fast)
                    # TODO add this back cval=(0, 100),  # if mode is constant, use a cval between 0 and 255
                    rotate=(-45, 45),  # rotate by -45 to +45 degrees
                    # mode=ia.ALL  # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                ), ),
        ],
        random_order=False)
    return seq_complex
Пример #3
0
from imgaug import augmenters as iaa

augmenter = iaa.Sequential(
    [
        iaa.Fliplr(0.5),
        iaa.Crop(px=(
            0,
            12)),  # crop images from each side by 0 to 12px (randomly chosen)
    ],
    random_order=True,
)
Пример #4
0
def data_gen(data, batch_size):
    # Get total number of samples in the data
    n = len(data)
    steps = n//batch_size
    
    # Define two numpy arrays for containing batch data and labels
    batch_data = np.zeros((batch_size, 224, 224, 3), dtype=np.float32)
    batch_labels = np.zeros((batch_size,2), dtype=np.float32)

    # Get a numpy array of all the indices of the input data
    indices = np.arange(n)
    
    # Initialize a counter
    i =0
    while True:
        np.random.shuffle(indices)
        # Get the next batch 
        count = 0
        next_batch = indices[(i*batch_size):(i+1)*batch_size]
        for j, idx in enumerate(next_batch):
            img_name = data.iloc[idx]['images']
            label = data.iloc[idx]['labels']
            
            # one hot encoding
            encoded_label = to_categorical(label, num_classes=2)
            # read the image and resize
            img = cv2.imread(str(img_name))
            img = cv2.resize(img, (224,224))
            
            # check if it's grayscale
            if img.shape[2]==1:
                img = np.dstack([img, img, img])
            
            # cv2 reads in BGR mode by default
            orig_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            # normalize the image pixels
            orig_img = img.astype(np.float32)/255.
            
            #segmentation
            oig_img=slic(orig_img) 
            
            batch_data[count] = orig_img
            batch_labels[count] = encoded_label
            #augmentation
            seq = iaa.OneOf([
                 iaa.Fliplr(), # horizontal flips
                 iaa.Affine(rotate=20), # roatation
                 iaa.Multiply((1.2, 1.5))]) #random brightness
            # generating more samples of the undersampled class
            if label==0 and count < batch_size-2:
                aug_img1 = seq.augment_image(img)
                aug_img2 = seq.augment_image(img)
                aug_img1 = cv2.cvtColor(aug_img1, cv2.COLOR_BGR2RGB)
                aug_img2 = cv2.cvtColor(aug_img2, cv2.COLOR_BGR2RGB)
                aug_img1 = aug_img1.astype(np.float32)/255.
                aug_img2 = aug_img2.astype(np.float32)/255.

                batch_data[count+1] = aug_img1
                batch_labels[count+1] = encoded_label
                batch_data[count+2] = aug_img2
                batch_labels[count+2] = encoded_label
                count +=2
            
            else:
                count+=1
            
            if count==batch_size-1:
                break
            
        i+=1
        yield batch_data, batch_labels
            
        if i>=steps:
            i=0
Пример #5
0
from imgaug import augmenters as iaa
import numpy as np
import cv2

images = cv2.imread('00000323_4.PNG')
images = cv2.cvtColor(images, cv2.COLOR_BGR2RGB)

#images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)


seq = iaa.Sequential([iaa.Fliplr(0.5),
                      iaa.GaussianBlur((0, 3.0))])

img_aug = seq.augment_images(images)

cv2.imshow('before', images)
cv2.imshow('aaa', img_aug)
cv2.waitKey(0)
cv2.destroyAllWindows()
dicom = pydicom.dcmread(train_img_path + train_df.index[101] + ".dcm")

fig, ax = plt.subplots(1, 2)

ax[0].imshow(window_testing(dicom, window_without_correction), cmap=plt.cm.bone);
ax[0].set_title("original")
ax[1].imshow(window_testing(dicom, window_with_correction), cmap=plt.cm.bone);
ax[1].set_title("corrected");


# ### Random image augmentation

# Image Augmentation
sometimes = lambda aug: iaa.Sometimes(0.25, aug)
     
augmentation = iaa.Sequential([ iaa.Fliplr(0.25),
                                iaa.Flipud(0.10),
                                iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5),
                                iaa.Sometimes(0.5,iaa.GaussianBlur(sigma=(0, 0.5))),# Strengthen or weaken the contrast in each image.
                                iaa.ContrastNormalization((0.75, 1.5)),
                                sometimes(iaa.Crop(px=(0, 25), keep_size = True, sample_independently = False))   
                            ], random_order = True)       
        


# Generators
class DataGenerator_Train(keras.utils.Sequence):
    def __init__(self, dataset, labels, batch_size = batch_size, image_shape = input_image_shape, image_path = train_img_path, augment = False, *args, **kwargs):
        self.dataset = dataset
        self.ids = dataset.index
        self.labels = labels
def sometimes(aug):
    return iaa.Sometimes(0.7, aug)


def sometimes_p9(aug):
    return iaa.Sometimes(0.9, aug)


def sometimes_p5(aug):
    return iaa.Sometimes(0.5, aug)


simple_seq = iaa.Sequential(
    [
        iaa.Fliplr(0.5),  # horizontally flip 50% of all images
        iaa.Flipud(0.5),  # vertically flip 20% of all images
        sometimes_p5(iaa.Rot90(k=ia.ALL)),
        # sometimes_p5(iaa.Cutout(nb_iterations=2, size=0.05, cval=0)),
    ],
    random_order=True)

seq = iaa.Sequential(
    [
        iaa.Fliplr(0.5),  # horizontally flip 50% of all images
        iaa.Flipud(0.5),  # vertically flip 20% of all images
        sometimes_p5(iaa.Cutout(nb_iterations=5, size=0.05, cval=0)),
        sometimes_p5([
            iaa.CropToFixedSize(width=1024 - 50, height=1024 - 50),
            # iaa.CropToFixedSize(width=900, height=900),
            iaa.Resize((1024, 1024)),
Пример #8
0
def tf_softmax(x):
    return tf.nn.softmax(x)


def unpickle(file):
    import pickle
    with open(file, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    return dict


# data aug
seq = iaa.Sequential(
    [
        iaa.Fliplr(1.0),  # horizontal flips
        iaa.Affine(scale={
            "x": (0.9, 1.2),
            "y": (0.9, 1.2)
        },
                   translate_percent={
                       "x": (-0.1, 0.1),
                       "y": (-0.1, 0.1)
                   },
                   rotate=(-25, 25),
                   shear=(-4, 4)),
    ],
    random_order=True)  # apply augmenters in random order


# code from: https://github.com/tensorflow/tensorflow/issues/8246
Пример #9
0
    iaa.Sometimes(0.1,
        iaa.Affine(
            translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
        )
    ),
    iaa.Sometimes(0.1,
        iaa.Affine(
            rotate=(-25, 25),
        )
    ),
    iaa.Sometimes(0.1,
        iaa.Affine(
            scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
        )
    ),
    iaa.Fliplr(1.0), # Horizonatl flips
], random_order=True) # apply augmenters in random order

# class
class CNN():
    
    def __init__(self,k,inc,out):
        self.w = tf.Variable(tf.random_normal([k,k,inc,out],stddev=0.05))
        self.m,self.v_prev = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))
        self.v_hat_prev = tf.Variable(tf.zeros_like(self.w))
    def getw(self): return self.w

    def feedforward(self,input,stride=1,padding='SAME'):
        self.input  = input
        self.layer  = tf.nn.conv2d(input,self.w,strides=[1,stride,stride,1],padding=padding) 
        self.layerA = tf_elu(self.layer)
Пример #10
0
def test_dtype_preservation():
    reseed()

    size = (4, 16, 16, 3)
    images = [
        np.random.uniform(0, 255, size).astype(np.uint8),
        np.random.uniform(0, 65535, size).astype(np.uint16),
        np.random.uniform(0, 4294967295, size).astype(np.uint32),
        np.random.uniform(-128, 127, size).astype(np.int16),
        np.random.uniform(-32768, 32767, size).astype(np.int32),
        np.random.uniform(0.0, 1.0, size).astype(np.float32),
        np.random.uniform(-1000.0, 1000.0, size).astype(np.float16),
        np.random.uniform(-1000.0, 1000.0, size).astype(np.float32),
        np.random.uniform(-1000.0, 1000.0, size).astype(np.float64)
    ]

    default_dtypes = set([arr.dtype for arr in images])

    # Some dtypes are here removed per augmenter, because the respective
    # augmenter does not support them. This test currently only checks whether
    # dtypes are preserved from in- to output for all dtypes that are supported
    # per augmenter.
    # dtypes are here removed via list comprehension instead of
    # `default_dtypes - set([dtype])`, because the latter one simply never
    # removed the dtype(s) for some reason?!

    def _not_dts(dts):
        return [dt for dt in default_dtypes if dt not in dts]

    augs = [
        (iaa.Add((-5, 5),
                 name="Add"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.AddElementwise((-5, 5), name="AddElementwise"),
         _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.AdditiveGaussianNoise(0.01 * 255, name="AdditiveGaussianNoise"),
         _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Multiply((0.95, 1.05), name="Multiply"),
         _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Dropout(0.01, name="Dropout"),
         _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"),
         _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Invert(0.01, per_channel=True, name="Invert"), default_dtypes),
        (iaa.ContrastNormalization(
            (0.95, 1.05), name="ContrastNormalization"), default_dtypes),
        (iaa.GaussianBlur(sigma=(0.95, 1.05),
                          name="GaussianBlur"), _not_dts([np.float16])),
        (iaa.AverageBlur((3, 5), name="AverageBlur"),
         _not_dts([np.uint32, np.int32, np.float16])),
        (iaa.MedianBlur((3, 5), name="MedianBlur"),
         _not_dts([np.uint32, np.int32, np.float16, np.float64])),
        (iaa.BilateralBlur((3, 5), name="BilateralBlur"),
         _not_dts([
             np.uint16, np.uint32, np.int16, np.int32, np.float16, np.float64
         ])),
        # WithColorspace ?
        # iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"), # works only with RGB/uint8
        # ChangeColorspace ?
        # iaa.Grayscale((0.0, 0.1), name="Grayscale"), # works only with RGB/uint8
        # Convolve ?
        (iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.DirectedEdgeDetect(alpha=(0.0, 0.1),
                                direction=0,
                                name="DirectedEdgeDetect"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.Fliplr(0.5, name="Fliplr"), default_dtypes),
        (iaa.Flipud(0.5, name="Flipud"), default_dtypes),
        (iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"),
         _not_dts([np.uint32, np.int32])),
        (iaa.Affine(translate_percent=(-0.05, 0.05),
                    name="Affine-translate-percent"),
         _not_dts([np.uint32, np.int32])),
        (iaa.Affine(rotate=(-20, 20),
                    name="Affine-rotate"), _not_dts([np.uint32, np.int32])),
        (iaa.Affine(shear=(-20, 20),
                    name="Affine-shear"), _not_dts([np.uint32, np.int32])),
        (iaa.Affine(scale=(0.9, 1.1),
                    name="Affine-scale"), _not_dts([np.uint32, np.int32])),
        (iaa.PiecewiseAffine(scale=(0.001, 0.005),
                             name="PiecewiseAffine"), default_dtypes),
        # (iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"), not_dts([np.uint32])),
        (iaa.ElasticTransformation(alpha=(0.1, 0.2),
                                   sigma=(0.1, 0.2),
                                   name="ElasticTransformation"),
         _not_dts([np.float16])),
        (iaa.Sequential([iaa.Noop(), iaa.Noop()],
                        name="SequentialNoop"), default_dtypes),
        (iaa.SomeOf(1, [iaa.Noop(), iaa.Noop()],
                    name="SomeOfNoop"), default_dtypes),
        (iaa.OneOf([iaa.Noop(), iaa.Noop()],
                   name="OneOfNoop"), default_dtypes),
        (iaa.Sometimes(0.5, iaa.Noop(), name="SometimesNoop"), default_dtypes),
        (iaa.Sequential([iaa.Add(
            (-5, 5)), iaa.AddElementwise((-5, 5))],
                        name="Sequential"),
         _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.SomeOf(1, [iaa.Add(
            (-5, 5)), iaa.AddElementwise((-5, 5))],
                    name="SomeOf"), _not_dts([np.uint32, np.int32,
                                              np.float64])),
        (iaa.OneOf([iaa.Add(
            (-5, 5)), iaa.AddElementwise((-5, 5))],
                   name="OneOf"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Sometimes(0.5, iaa.Add((-5, 5)), name="Sometimes"),
         _not_dts([np.uint32, np.int32, np.float64])),
        # WithChannels
        (iaa.Noop(name="Noop"), default_dtypes),
        # Lambda
        # AssertLambda
        # AssertShape
        (iaa.Alpha((0.0, 0.1), iaa.Noop(), name="AlphaNoop"), default_dtypes),
        (iaa.AlphaElementwise((0.0, 0.1),
                              iaa.Noop(),
                              name="AlphaElementwiseNoop"), default_dtypes),
        (iaa.SimplexNoiseAlpha(iaa.Noop(),
                               name="SimplexNoiseAlphaNoop"), default_dtypes),
        (iaa.FrequencyNoiseAlpha(exponent=(-2, 2),
                                 first=iaa.Noop(),
                                 name="SimplexNoiseAlphaNoop"),
         default_dtypes),
        (iaa.Alpha((0.0, 0.1), iaa.Add(10),
                   name="Alpha"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10),
                              name="AlphaElementwise"),
         _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"),
         _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.FrequencyNoiseAlpha(exponent=(-2, 2),
                                 first=iaa.Add(10),
                                 name="SimplexNoiseAlpha"),
         _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Superpixels(p_replace=0.01, n_segments=64),
         _not_dts([np.float16, np.float32, np.float64])),
        (iaa.Resize({
            "height": 4,
            "width": 4
        }, name="Resize"),
         _not_dts([
             np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16,
             np.float64
         ])),
        (iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
         _not_dts([
             np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16,
             np.float64
         ])),
        (iaa.Pad(px=(0, 10), name="Pad"),
         _not_dts([
             np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16,
             np.float64
         ])),
        (iaa.Crop(px=(0, 10), name="Crop"),
         _not_dts([
             np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16,
             np.float64
         ]))
    ]

    for (aug, allowed_dtypes) in augs:
        # print("aug", aug.name)
        # print("allowed_dtypes", allowed_dtypes)
        for images_i in images:
            if images_i.dtype in allowed_dtypes:
                # print("image dt", images_i.dtype)
                images_aug = aug.augment_images(images_i)
                assert images_aug.dtype == images_i.dtype
            else:
                # print("image dt", images_i.dtype, "[SKIPPED]")
                pass
Пример #11
0
    def __getitem__(self, index):
        if self.mode== 'test':
            pic_name = self.pic_list[index].split('\n')[0]
            image_path = DataDir+'test/' + pic_name
            img = jpeg.JPEG(image_path).decode()
            image = cv2.imread(image_path, 1)
            image = cv2.resize(image, (self.image_size[1], self.image_size[0]))
            image = np.transpose(image, (2, 0, 1))
            image = image.astype(np.float32)
            image = image.reshape([-1, self.image_size[0], self.image_size[1]])
            image = image / 255.0
            return torch.FloatTensor(image)
        
        elif self.mode == 'train':
            if (index >= len(self.pic_list)) & (self.flip_label) : 
                origin_index = index - len(self.pic_list)
                pic_name= self.pic_list[origin_index].split(',')[0]
                pic_label = self.pic_list[origin_index].split(',')[1].split('\n')[0]
                pic_label = self.image_label_dict[pic_label]
            else:
                pic_name= self.pic_list[index].split(',')[0]
                pic_label = self.pic_list[index].split(',')[1].split('\n')[0]
                pic_label = self.image_label_dict[pic_label]                

            image_path = DataDir+'train/' + pic_name
            image = cv2.imread(image_path, 1)
            
            if self.augment:
                if random.randint(0,1) == 0:
                    image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
                    image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
                if random.randint(0, 1) == 0:
                    image = Perspective_aug(image)
                    image = cv2.resize(image, (self.image_size[1], self.image_size[0]))
                image = aug_image(image)
                
            if self.flip_label:
                if pic_label == 5004: # if is_new_whle
                    seq = iaa.Sequential([iaa.Fliplr(0.5)])
                    image = seq.augment_image(image)
                    
                elif index >= len(self.pic_list): # 
                    seq = iaa.Sequential([iaa.Fliplr(1.0)])
                    image = seq.augment_image(image)
                    pic_label += 5005

            image = cv2.resize(image, (self.image_size[1], self.image_size[0]))
            image = np.transpose(image, (2, 0, 1))
            image = image.astype(np.float32)
            image = image.reshape([-1, self.image_size[0], self.image_size[1]])
            image = image / 255.0
            return torch.FloatTensor(image), pic_label                
                
        elif self.mode == 'valid':   
            pic_name= self.pic_list[index].split(',')[0]
            pic_label = self.pic_list[index].split(',')[1].split('\n')[0]
            pic_label = self.image_label_dict[pic_label]
            image_path = DataDir+'train/' + pic_name
            image = cv2.imread(image_path, 1)
            
            if self.flip_label:
                seq = iaa.Sequential([iaa.Fliplr(1.0)])
                image = seq.augment_image(image)
                if pic_label != 5004: 
                    pic_label += 5005
                    
            image = cv2.resize(image, (self.image_size[1], self.image_size[0]))
            image = np.transpose(image, (2, 0, 1))
            image = image.astype(np.float32)
            image = image.reshape([-1, self.image_size[0], self.image_size[1]])
            image = image / 255.0
            return torch.FloatTensor(image), pic_label
        
        else:
            raise Exception('mode not available')
Пример #12
0
def test_determinism():
    reseed()

    images = [
        ia.quokka(size=(128, 128)),
        ia.quokka(size=(64, 64)),
        ia.imresize_single_image(skimage.data.astronaut(), (128, 256))
    ]
    images.extend([ia.quokka(size=(16, 16))] * 20)

    keypoints = [
        ia.KeypointsOnImage([
            ia.Keypoint(x=20, y=10),
            ia.Keypoint(x=5, y=5),
            ia.Keypoint(x=10, y=43)
        ],
                            shape=(50, 60, 3))
    ] * 20

    augs = [
        iaa.Sequential([iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.SomeOf(1, [iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.OneOf([iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.Sometimes(0.5, iaa.Fliplr(1.0)),
        iaa.WithColorspace("HSV", children=iaa.Add((-50, 50))),
        # iaa.WithChannels([0], iaa.Add((-50, 50))),
        # iaa.Noop(name="Noop-nochange"),
        # iaa.Lambda(
        #     func_images=lambda images, random_state, parents, hooks: images,
        #     func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: keypoints_on_images,
        #     name="Lambda-nochange"
        # ),
        # iaa.AssertLambda(
        #     func_images=lambda images, random_state, parents, hooks: True,
        #     func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: True,
        #     name="AssertLambda-nochange"
        # ),
        # iaa.AssertShape(
        #     (None, None, None, 3),
        #     check_keypoints=False,
        #     name="AssertShape-nochange"
        # ),
        iaa.Resize((0.5, 0.9)),
        iaa.CropAndPad(px=(-50, 50)),
        iaa.Pad(px=(1, 50)),
        iaa.Crop(px=(1, 50)),
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.Superpixels(p_replace=(0.25, 1.0), n_segments=(16, 128)),
        # iaa.ChangeColorspace(to_colorspace="GRAY"),
        iaa.Grayscale(alpha=(0.1, 1.0)),
        iaa.GaussianBlur((0.1, 3.0)),
        iaa.AverageBlur((3, 11)),
        iaa.MedianBlur((3, 11)),
        # iaa.Convolve(np.array([[0, 1, 0],
        #                       [1, -4, 1],
        #                       [0, 1, 0]])),
        iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0.8, 1.2)),
        iaa.Emboss(alpha=(0.1, 1.0), strength=(0.8, 1.2)),
        iaa.EdgeDetect(alpha=(0.1, 1.0)),
        iaa.DirectedEdgeDetect(alpha=(0.1, 1.0), direction=(0.0, 1.0)),
        iaa.Add((-50, 50)),
        iaa.AddElementwise((-50, 50)),
        iaa.AdditiveGaussianNoise(scale=(0.1, 1.0)),
        iaa.Multiply((0.6, 1.4)),
        iaa.MultiplyElementwise((0.6, 1.4)),
        iaa.Dropout((0.3, 0.5)),
        iaa.CoarseDropout((0.3, 0.5), size_percent=(0.05, 0.2)),
        iaa.Invert(0.5),
        iaa.ContrastNormalization((0.6, 1.4)),
        iaa.Affine(scale=(0.7, 1.3),
                   translate_percent=(-0.1, 0.1),
                   rotate=(-20, 20),
                   shear=(-20, 20),
                   order=ia.ALL,
                   mode=ia.ALL,
                   cval=(0, 255)),
        iaa.PiecewiseAffine(scale=(0.1, 0.3)),
        iaa.ElasticTransformation(alpha=0.5)
    ]

    augs_affect_geometry = [
        iaa.Sequential([iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.SomeOf(1, [iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.OneOf([iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.Sometimes(0.5, iaa.Fliplr(1.0)),
        iaa.Resize((0.5, 0.9)),
        iaa.CropAndPad(px=(-50, 50)),
        iaa.Pad(px=(1, 50)),
        iaa.Crop(px=(1, 50)),
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.Affine(scale=(0.7, 1.3),
                   translate_percent=(-0.1, 0.1),
                   rotate=(-20, 20),
                   shear=(-20, 20),
                   order=ia.ALL,
                   mode=ia.ALL,
                   cval=(0, 255)),
        iaa.PiecewiseAffine(scale=(0.1, 0.3)),
        iaa.ElasticTransformation(alpha=(5, 100), sigma=(3, 5))
    ]

    for aug in augs:
        aug_det = aug.to_deterministic()
        images_aug1 = aug_det.augment_images(images)
        images_aug2 = aug_det.augment_images(images)

        aug_det = aug.to_deterministic()
        images_aug3 = aug_det.augment_images(images)
        images_aug4 = aug_det.augment_images(images)

        assert array_equal_lists(images_aug1, images_aug2), \
            "Images (1, 2) expected to be identical for %s" % (aug.name,)

        assert array_equal_lists(images_aug3, images_aug4), \
            "Images (3, 4) expected to be identical for %s" % (aug.name,)

        assert not array_equal_lists(images_aug1, images_aug3), \
            "Images (1, 3) expected to be different for %s" % (aug.name,)

    for aug in augs_affect_geometry:
        aug_det = aug.to_deterministic()
        kps_aug1 = aug_det.augment_keypoints(keypoints)
        kps_aug2 = aug_det.augment_keypoints(keypoints)

        aug_det = aug.to_deterministic()
        kps_aug3 = aug_det.augment_keypoints(keypoints)
        kps_aug4 = aug_det.augment_keypoints(keypoints)

        assert keypoints_equal(kps_aug1, kps_aug2), \
            "Keypoints (1, 2) expected to be identical for %s" % (aug.name,)

        assert keypoints_equal(kps_aug3, kps_aug4), \
            "Keypoints (3, 4) expected to be identical for %s" % (aug.name,)

        assert not keypoints_equal(kps_aug1, kps_aug3), \
            "Keypoints (1, 3) expected to be different for %s" % (aug.name,)
Пример #13
0
def test_unusual_channel_numbers():
    reseed()

    images = [(0, create_random_images((4, 16, 16))),
              (1, create_random_images((4, 16, 16, 1))),
              (2, create_random_images((4, 16, 16, 2))),
              (4, create_random_images((4, 16, 16, 4))),
              (5, create_random_images((4, 16, 16, 5))),
              (10, create_random_images((4, 16, 16, 10))),
              (20, create_random_images((4, 16, 16, 20)))]

    augs = [
        iaa.Add((-5, 5), name="Add"),
        iaa.AddElementwise((-5, 5), name="AddElementwise"),
        iaa.AdditiveGaussianNoise(0.01 * 255, name="AdditiveGaussianNoise"),
        iaa.Multiply((0.95, 1.05), name="Multiply"),
        iaa.Dropout(0.01, name="Dropout"),
        iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"),
        iaa.Invert(0.01, per_channel=True, name="Invert"),
        iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"),
        iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"),
        iaa.AverageBlur((3, 5), name="AverageBlur"),
        iaa.MedianBlur((3, 5), name="MedianBlur"),
        # iaa.BilateralBlur((3, 5), name="BilateralBlur"), # works only with 3/RGB channels
        # WithColorspace ?
        # iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"), # works only with 3/RGB channels
        # ChangeColorspace ?
        # iaa.Grayscale((0.0, 0.1), name="Grayscale"), # works only with 3 channels
        # Convolve ?
        iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
        iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
        iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
        iaa.DirectedEdgeDetect(alpha=(0.0, 0.1),
                               direction=0,
                               name="DirectedEdgeDetect"),
        iaa.Fliplr(0.5, name="Fliplr"),
        iaa.Flipud(0.5, name="Flipud"),
        iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"),
        iaa.Affine(translate_percent=(-0.05, 0.05),
                   name="Affine-translate-percent"),
        iaa.Affine(rotate=(-20, 20), name="Affine-rotate"),
        iaa.Affine(shear=(-20, 20), name="Affine-shear"),
        iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"),
        iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"),
        iaa.PerspectiveTransform(scale=(0.01, 0.10),
                                 name="PerspectiveTransform"),
        iaa.ElasticTransformation(alpha=(0.1, 0.2),
                                  sigma=(0.1, 0.2),
                                  name="ElasticTransformation"),
        iaa.Sequential([iaa.Add((-5, 5)),
                        iaa.AddElementwise((-5, 5))]),
        iaa.SomeOf(1, [iaa.Add(
            (-5, 5)), iaa.AddElementwise((-5, 5))]),
        iaa.OneOf([iaa.Add((-5, 5)),
                   iaa.AddElementwise((-5, 5))]),
        iaa.Sometimes(0.5, iaa.Add((-5, 5)), name="Sometimes"),
        # WithChannels
        iaa.Noop(name="Noop"),
        # Lambda
        # AssertLambda
        # AssertShape
        iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"),
        iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"),
        iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"),
        iaa.FrequencyNoiseAlpha(exponent=(-2, 2),
                                first=iaa.Add(10),
                                name="SimplexNoiseAlpha"),
        iaa.Superpixels(p_replace=0.01, n_segments=64),
        iaa.Resize({
            "height": 4,
            "width": 4
        }, name="Resize"),
        iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
        iaa.Pad(px=(0, 10), name="Pad"),
        iaa.Crop(px=(0, 10), name="Crop")
    ]

    for aug in augs:
        for (nb_channels, images_c) in images:
            if aug.name != "Resize":
                images_aug = aug.augment_images(images_c)
                assert images_aug.shape == images_c.shape
                image_aug = aug.augment_image(images_c[0])
                assert image_aug.shape == images_c[0].shape
            else:
                images_aug = aug.augment_images(images_c)
                image_aug = aug.augment_image(images_c[0])
                if images_c.ndim == 3:
                    assert images_aug.shape == (4, 4, 4)
                    assert image_aug.shape == (4, 4)
                else:
                    assert images_aug.shape == (4, 4, 4, images_c.shape[3])
                    assert image_aug.shape == (4, 4, images_c.shape[3])
Пример #14
0
def test_keypoint_augmentation():
    reseed()

    keypoints = []
    for y in range(40 // 5):
        for x in range(60 // 5):
            keypoints.append(ia.Keypoint(y=y * 5, x=x * 5))

    keypoints_oi = ia.KeypointsOnImage(keypoints, shape=(40, 60, 3))
    keypoints_oi_empty = ia.KeypointsOnImage([], shape=(40, 60, 3))

    augs = [
        iaa.Add((-5, 5), name="Add"),
        iaa.AddElementwise((-5, 5), name="AddElementwise"),
        iaa.AdditiveGaussianNoise(0.01 * 255, name="AdditiveGaussianNoise"),
        iaa.Multiply((0.95, 1.05), name="Multiply"),
        iaa.Dropout(0.01, name="Dropout"),
        iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"),
        iaa.Invert(0.01, per_channel=True, name="Invert"),
        iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"),
        iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"),
        iaa.AverageBlur((3, 5), name="AverageBlur"),
        iaa.MedianBlur((3, 5), name="MedianBlur"),
        # iaa.BilateralBlur((3, 5), name="BilateralBlur"),
        # WithColorspace ?
        # iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"),
        # ChangeColorspace ?
        # Grayscale cannot be tested, input not RGB
        # Convolve ?
        iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
        iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
        iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
        iaa.DirectedEdgeDetect(alpha=(0.0, 0.1),
                               direction=0,
                               name="DirectedEdgeDetect"),
        iaa.Fliplr(0.5, name="Fliplr"),
        iaa.Flipud(0.5, name="Flipud"),
        iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"),
        iaa.Affine(translate_percent=(-0.05, 0.05),
                   name="Affine-translate-percent"),
        iaa.Affine(rotate=(-20, 20), name="Affine-rotate"),
        iaa.Affine(shear=(-20, 20), name="Affine-shear"),
        iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"),
        iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"),
        # iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"),
        iaa.ElasticTransformation(alpha=(0.1, 0.2),
                                  sigma=(0.1, 0.2),
                                  name="ElasticTransformation"),
        # Sequential
        # SomeOf
        # OneOf
        # Sometimes
        # WithChannels
        # Noop
        # Lambda
        # AssertLambda
        # AssertShape
        iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"),
        iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"),
        iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"),
        iaa.FrequencyNoiseAlpha(exponent=(-2, 2),
                                first=iaa.Add(10),
                                name="SimplexNoiseAlpha"),
        iaa.Superpixels(p_replace=0.01, n_segments=64),
        iaa.Resize(0.5, name="Resize"),
        iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
        iaa.Pad(px=(0, 10), name="Pad"),
        iaa.Crop(px=(0, 10), name="Crop")
    ]

    for aug in augs:
        dss = []
        for i in range(10):
            aug_det = aug.to_deterministic()

            kp_fully_empty_aug = aug_det.augment_keypoints([])
            assert kp_fully_empty_aug == []

            kp_first_empty_aug = aug_det.augment_keypoints(
                [keypoints_oi_empty])[0]
            assert len(kp_first_empty_aug.keypoints) == 0

            kp_image = keypoints_oi.to_keypoint_image(size=5)
            kp_image_aug = aug_det.augment_image(kp_image)
            kp_image_aug_rev = ia.KeypointsOnImage.from_keypoint_image(
                kp_image_aug,
                if_not_found_coords={
                    "x": -9999,
                    "y": -9999
                },
                nb_channels=1)
            kp_aug = aug_det.augment_keypoints([keypoints_oi])[0]
            ds = []
            assert len(kp_image_aug_rev.keypoints) == len(kp_aug.keypoints),\
                "Lost keypoints for '%s' (%d vs expected %d)" \
                % (aug.name, len(kp_aug.keypoints), len(kp_image_aug_rev.keypoints))
            for kp_pred, kp_pred_img in zip(kp_aug.keypoints,
                                            kp_image_aug_rev.keypoints):
                kp_pred_lost = (kp_pred.x == -9999 and kp_pred.y == -9999)
                kp_pred_img_lost = (kp_pred_img.x == -9999
                                    and kp_pred_img.y == -9999)

                if not kp_pred_lost and not kp_pred_img_lost:
                    d = np.sqrt((kp_pred.x - kp_pred_img.x)**2 +
                                (kp_pred.y - kp_pred_img.y)**2)
                    ds.append(d)
            dss.extend(ds)
            if len(ds) == 0:
                print("[INFO] No valid keypoints found for '%s' "
                      "in test_keypoint_augmentation()" % (str(aug), ))
        assert np.average(dss) < 5.0, \
            "Average distance too high (%.2f, with ds: %s)" \
            % (np.average(dss), str(dss))
def _load_augmentation_aug_all():
    """ Load image augmentation model """
    def sometimes(aug):
        return iaa.Sometimes(0.5, aug)

    return iaa.Sequential(
        [
            # apply the following augmenters to most images
            iaa.Fliplr(0.5),  # horizontally flip 50% of all images
            iaa.Flipud(0.2),  # vertically flip 20% of all images
            # crop images by -5% to 10% of their height/width
            sometimes(
                iaa.CropAndPad(percent=(-0.05, 0.1),
                               pad_mode='constant',
                               pad_cval=(0, 255))),
            sometimes(
                iaa.Affine(
                    # scale images to 80-120% of their size, individually per axis
                    scale={
                        "x": (0.8, 1.2),
                        "y": (0.8, 1.2)
                    },
                    # translate by -20 to +20 percent (per axis)
                    translate_percent={
                        "x": (-0.2, 0.2),
                        "y": (-0.2, 0.2)
                    },
                    rotate=(-45, 45),  # rotate by -45 to +45 degrees
                    shear=(-16, 16),  # shear by -16 to +16 degrees
                    # use nearest neighbour or bilinear interpolation (fast)
                    order=[0, 1],
                    # if mode is constant, use a cval between 0 and 255
                    cval=(0, 255),
                    # use any of scikit-image's warping modes
                    # (see 2nd image from the top for examples)
                    mode='constant')),
            # execute 0 to 5 of the following (less important) augmenters per
            # image don't execute all of them, as that would often be way too
            # strong
            iaa.SomeOf(
                (0, 5),
                [
                    # convert images into their superpixel representation
                    sometimes(
                        iaa.Superpixels(p_replace=(0, 1.0),
                                        n_segments=(20, 200))),
                    iaa.OneOf([
                        # blur images with a sigma between 0 and 3.0
                        iaa.GaussianBlur((0, 3.0)),
                        # blur image using local means with kernel sizes
                        # between 2 and 7
                        iaa.AverageBlur(k=(2, 7)),
                        # blur image using local medians with kernel sizes
                        # between 2 and 7
                        iaa.MedianBlur(k=(3, 11)),
                    ]),
                    iaa.Sharpen(alpha=(0, 1.0),
                                lightness=(0.75, 1.5)),  # sharpen images
                    iaa.Emboss(alpha=(0, 1.0),
                               strength=(0, 2.0)),  # emboss images
                    # search either for all edges or for directed edges,
                    # blend the result with the original image using a blobby mask
                    iaa.SimplexNoiseAlpha(
                        iaa.OneOf([
                            iaa.EdgeDetect(alpha=(0.5, 1.0)),
                            iaa.DirectedEdgeDetect(alpha=(0.5, 1.0),
                                                   direction=(0.0, 1.0)),
                        ])),
                    # add gaussian noise to images
                    iaa.AdditiveGaussianNoise(
                        loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
                    iaa.OneOf([
                        # randomly remove up to 10% of the pixels
                        iaa.Dropout((0.01, 0.1), per_channel=0.5),
                        iaa.CoarseDropout((0.03, 0.15),
                                          size_percent=(0.02, 0.05),
                                          per_channel=0.2),
                    ]),
                    # invert color channels
                    iaa.Invert(0.05, per_channel=True),
                    # change brightness of images (by -10 to 10 of original value)
                    iaa.Add((-10, 10), per_channel=0.5),
                    # change hue and saturation
                    iaa.AddToHueAndSaturation((-20, 20)),
                    # either change the brightness of the whole image (sometimes
                    # per channel) or change the brightness of subareas
                    iaa.OneOf([
                        iaa.Multiply((0.5, 1.5), per_channel=0.5),
                        iaa.FrequencyNoiseAlpha(
                            exponent=(-4, 0),
                            first=iaa.Multiply((0.5, 1.5), per_channel=True),
                            second=iaa.ContrastNormalization((0.5, 2.0)))
                    ]),
                    # improve or worsen the contrast
                    iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5),
                    iaa.Grayscale(alpha=(0.0, 1.0)),
                    # move pixels locally around (with random strengths)
                    sometimes(
                        iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)
                    ),
                    # sometimes move parts of the image around
                    sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
                    sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
                ],
                random_order=True)
        ],
        random_order=True)
Пример #16
0
        def __init__(
                self,
                inputdata,
                inputlabels,
                augs="basic",  #["all","basic","form","valalt","pxlalt","imgalt"]
                num_outs=5,
                og_out=True,
                mode='G',
                em=0,
                intensity=1.0,
                rescaledata=None,
                formatd='NCHW',
                min_augs=0,
                max_augs=5):
            if self.mode.lower() == 'g':
                self.NM = self.rung
            elif self.mode.lower() == 'i':
                self.NM = self.runi()
            elif self.mode.lower() == 'i2':
                self.NM = self.runi2()
            else:
                print(
                    "invalid mode, use 'g' for generator or 'i' for iterator or 'i2'"
                )
                exit()
            self.minaug = min_augs
            self.maxaug = max_augs
            #self.affineopt=["scale","translate_percent","translate_px","rotate","shear"]
            #self.chnlopt=[{"per_channel":True},{"per_channel":False}]
            if len(inputdata.shape) == 4:
                self.D = 4
            elif len(inputdata.shape) == 3:
                self.D = 3
            elif len(inputdata.shape) == 2:
                self.D = 2
            if formatd == "NCHW":
                if self.D == 4:
                    self.inputd = np.transpose(inputdata, [0, 2, 3, 1])
                elif self.D == 3:
                    self.inputd = np.transpose(inputdata, [1, 2, 0])
            else:
                self.inputd = inputdata
            self.Y = inputlabels
            leninten = 8
            if isinstance(intensity, (float, int)):
                itensity = [intensity for _ in range(leninten)]
            else:
                assert len(intensity) == leninten
            self.datashape = np.array(inputdata.shape)  #inputdata[0].shape
            if self.datashape.min() == self.datashape[-1]:
                self.pixls = self.datashape[:-1]
            elif self.datashape.min() == self.datashape[1]:
                self.pixls = np.delete(self.datashape, 1)
            elif self.datashape.shape == (3, ):
                self.pixls = self.datashape[1:]
            else:
                print("error cannot fin the shape of images")
                exit()
            # can use "keep-aspect-ratio" for an arg to have a relative and absolute scale
            #or can also use list for randomization between options
            self.scalevals = (0.5 / (2 * intensity), 1.0)  #use % of image
            self.augs = augs
            self.Pchances = 0.44 * itensity[0]
            self.intrange = ((ceil(10 * intensity[1]),
                              ceil(10 + 140 * itensity[1])))
            self.windowrange = (ceil(2 * intensity[2]),
                                ceil((min(self.pixls) / 5) - 8) * intensity[2]
                                )  #mean/median things
            self.relatrange = (0.1 * intensity[3], 0.95 * intensity[3]
                               )  #normalisation,invert
            self.bigfloat = (
                0.085 * intensity[4], 1.75 * intensity[4]
            )  #some scale values,multiply,contrastnorm,elasti trans,(sigman&alpha)
            self.smallfloat = (0.001 * intensity[5], 0.45 * intensity[5]
                               )  #coarse dropout/droput(p)
            self.addrange = (ceil(-140 * intensity[6]),
                             ceil(140 * intensity[6]))
            self.multrange = (-2.0 * intensity[7], 2.0 * intensity[7])
            self.perchannelsplit = 0.75 * intensity[
                8]  #used for per_channel on the mult
            self.allaugs = {
                "add":
                IAGA.Add(value=self.addrange, per_channel=0.75 * intensity),
                "scale":
                IAGA.Scale(size=self.scalevals),
                "adde":
                IAGA.AddElementwise(value=self.addrange,
                                    per_channel=0.75 * intensity),
                "addg":
                IAGA.AdditiveGaussianNoise(scale=(0, self.smallfloat[1] * 255),
                                           per_channel=0.75 * intensity),
                "addh":
                IAGA.AddToHueAndSaturation(value=self.addrange,
                                           per_channel=0.75 * intensity),
                "mult":
                IAGA.Multiply(mul=self.bigfloat, per_channel=0.75 * intensity),
                "mule":
                IAGA.MultiplyElementwise(mul=self.bigfloat,
                                         per_channel=0.75 * intensity),
                "drop":
                IAGA.Dropout(p=self.smallfloat, per_channel=0.75 * intensity),
                "cdrop":
                IAGA.CoarseDropout(p=self.smallfloat,
                                   size_px=None,
                                   size_percent=self.smallfloat,
                                   per_channel=True,
                                   min_size=3),
                "inv":
                IAGA.Invert(p=self.Pchances,
                            per_channel=0.75 * intensity,
                            min_value=-255,
                            max_value=255),
                "cont":
                IAGA.ContrastNormalization(alpha=self.bigfloat,
                                           per_channel=0.75 * intensity),
                "aff":
                IAGA.Affine(
                    scale=self.bigfloat,
                    translate_percent={
                        'x': (-40 * intensity, 40 * intensity),
                        'y': (-40 * intensity, 40 * intensity)
                    },
                    translate_px=None,  #moving functions
                    rotate=(-360 * intensity, 360 * intensity),
                    shear=(-360 * intensity, 360 * intensity),
                    order=[0, 1]  #2,3,4,5 may be too much
                    ,
                    cval=0,  #for filling
                    mode=["constant", "edge", "reflect", "symmetric",
                          "wrap"][em],  #filling method
                    deterministic=False,
                    random_state=None),
                "paff":
                IAGA.PiecewiseAffine(
                    scale=(-0.075 * intensity, 0.075 * intensity),
                    nb_rows=(ceil(2 * intensity), ceil(7 * intensity)),
                    nb_cols=(ceil(2 * intensity), ceil(7 * intensity)),
                    order=[0, 1],
                    cval=0,
                    mode=["constant", "edge", "reflect", "symmetric",
                          "wrap"][em],
                    deterministic=False,
                    random_state=None),
                "elas":
                IAGA.ElasticTransformation(alpha=self.bigfloat,
                                           sigma=self.relatrange),
                "noop":
                IAGA.Noop(name="nope"),
                #IAGA.Lambda:{},
                "cropad":
                IAGA.CropAndPad(
                    px=None,
                    percent=(-0.65 * intensity[7], 0.65 * intensity[7]),
                    pad_mode=[
                        "constant", "edge", "reflect", "symmetric", "wrap"
                    ][em],
                    pad_cval=0,
                    keep_size=True,
                    sample_independently=True,
                ),
                "fliplr":
                IAGA.Fliplr(p=self.Pchances),
                "flipud":
                IAGA.Flipud(p=self.Pchances),
                "spixel":
                IAGA.Superpixels(p_replace=self.Pchances,
                                 n_segments=self.intrange),
                #IAGA.ChangeColorspace:,
                "gray":
                IAGA.Grayscale(alpha=self.relatrange),
                "gblur":
                IAGA.GaussianBlur(sigma=self.bigfloat),
                "ablur":
                IAGA.AverageBlur(k=self.windowrange),
                "mblur":
                IAGA.MedianBlur(k=self.windowrange),
                #IAGA.BilateralBlur,
                #IAGA.Convolve:,
                "sharp":
                IAGA.Sharpen(alpha=self.relatrange, lightness=self.bigfloat),
                "embo":
                IAGA.Emboss(alpha=self.relatrange, strenght=self.bigfloat),
                "edge":
                IAGA.EdgeDetect(alpha=self.relatrange),
                "dedge":
                IAGA.DirectedEdgeDetect(alpha=self.bigfloat,
                                        direction=(-1.0 * intensity,
                                                   1.0 * intensity)),
                "pert":
                IAGA.PerspectiveTransform(scale=self.smallfloat),
                "salt":
                IAGA.Salt(p=self.Pchances, per_channel=0.75 * intensity),
                #IAGA.CoarseSalt(p=, size_px=None, size_percent=None,per_channel=False, min_size=4),
                #IAGA.CoarsePepper(p=, size_px=None, size_percent=None,"per_channel=False, min_size=4),
                #IAGA.CoarseSaltAndPepper(p=, size_px=None, size_percent=None,per_channel=False, min_size=4),
                "pep":
                IAGA.Pepper(p=self.Pchances, per_channel=0.75 * intensity),
                "salpep":
                IAGA.SaltAndPepper(p=self.Pchances,
                                   per_channel=0.75 * intensity),
                #"alph":IAGA.Alpha(factor=,first=,second=,per_channel=0.75*intensity,),
                #"aplhe":IAGA.AlphaElementwise(factor=,first=,second=,per_channel=0.75*intensity,),
                #IAGA.FrequencyNoiseAlpha(exponent=(-4, 4),first=None, second=None, per_channel=False,size_px_max=(4, 16), upscale_method=None,iterations=(1, 3), aggregation_method=["avg", "max"],sigmoid=0.5, sigmoid_thresh=None,),
                #IAGA.SimplexNoiseAlpha(first=None, second=None, per_channel=False,size_px_max=(2, 16), upscale_method=None,iterations=(1, 3), aggregation_method="max",sigmoid=True, sigmoid_thresh=None,),
            }
            ["all", "basic", "form", "valalt", "pxlalt", "imgalt"]
            self.augs = []
            if (augs == "all") or ("all" in augs):
                self.augs = [
                    "add",
                    "scale",
                    "adde",
                    "addg",
                    "addh",
                    "mult",
                    "mule",
                    "drop",
                    "cdrop",
                    "inv",
                    "cont",
                    "aff",
                    "paff",
                    "elas",
                    "noop",
                    "cropad",
                    "fliplr",
                    "flipud",
                    "spixel",
                    "gray",
                    "gblur",
                    "ablur",
                    "mblur",
                    "sharp",
                    "embo",
                    "edge",
                    "dedge",
                    "pert",
                    "salt",
                    "pep",
                    "salpep",
                ]  #"alph", "aplhe",]
            else:
                if (augs == "basic") or ("basic" in augs):
                    self.augs.append([
                        "add", "scale", "addh", "mult", "drop", "cont", "noop"
                    ])
                if (augs == "form") or ("form" in augs):
                    self.augs + [
                        "scale", "aff", "paff", "elas", "noop", "pert"
                    ]
                if (augs == "valalt") or ("valalt" in augs):
                    self.augs + [
                        "mult", "mule", "inv", "fliplr", "flipud", "cropad",
                        "noop"
                    ]
                if (augs == "pxlalt") or ("pxlalt" in augs):
                    self.augs + [
                        "addg", "drop", "salt", "pep", "salpep", "noop"
                    ]
                if (augs == "imgalt") or ("imgalt" in augs):
                    self.augs + [
                        "elas",
                        "noop",
                        "spixel",
                        "gblur",
                        "ablur",
                        "mblur",
                        "sharp",
                        "embo",
                        "edge",
                        "dedge",
                    ]
                if len(augs) == 0:
                    self.augs + [
                        "add",
                        "scale",
                        "addh",
                        "drop",
                        "cont",
                        "aff",
                        "elas",
                        "noop",
                        "cropad",
                        "gray",
                        "ablur",
                        "sharp",
                        "salpep",
                    ]
            self.AUG = IAGA.SomeOf((self.minaug, self.maxaug),
                                   self.augs,
                                   random_order=True)
            """self.affineopts={"scale":self.biglfoat,
                              "translate_percent":{'x':(-40*intensity,40*intensity),'y':(-40*intensity,40*intensity)}, "translate_px":None,#moving functions
                     "rotate":(-360*intensity,360*intensity), "shear":(0*intensity,360*intensity),
                      "order":[0,1]#2,3,4,5 may be too much
                     , "cval":0,#for filling
                      "mode":"constant",#filling method
                      "deterministic":False,
                       "random_state":None}
            self.pieceaffinev={"scale"=(-0.075*intensity,0.075*intensity), "nb_rows"=(ceil(2*intensity),ceil(7*intensity)), "nb_cols"=(ceil(2*intensity),ceil(7*intensity)),
                                "order"=[0,1], "cval"=0, "mode"="constant",
                      "deterministic"=False, "random_state"=None}"""
            self.num_outs = num_outs - og_out
            self.og_out = og_out
            self.mode = mode
            self.iimg = -1
            self.iout = 0
            try:
                self.len = inputdata.shape[0]
            except:
                self.len = len(inputdata)

            def __iter__(self):
                return self

            def __next__(self):
                return (self.NM())

            def next(self):
                return (self.NM())

            def runi(self):
                if self.iimg == self.len:
                    raise StopIteration
                self.iimg += 1
                img = self.inputd[self.iimg]
                y = self.Y[self.iimg]
                out = np.broadcast_to(img, (self.num_out, *img.shape[-3:]))
                out = self.AUG.augment_images(out[self.og_out:])
                if self.og_out:
                    if len(img.shape) == 3:
                        out = np.concatenate(out, np.expand_dims(img, 0))
                    else:
                        out = np.concatenate(out, img)
                if self.format == "NCHW":
                    out = np.transpose(out, [0, 3, 1, 2])
                return ([(outi, y) for outi in out])

            def runi2(self):
                if self.iimg == self.len:
                    raise StopIteration
                if (self.iout == self.num_outs) or (self.iimg == -1):
                    self.iimg += 1
                    self.iout = 0
                    img = self.inputd[self.iimg]
                    y = self.Y[self.iimg]
                    out = np.broadcast_to(img, (self.num_out, *img.shape[-3:]))
                    self.out = self.AUG.augment_images(out[self.og_out:])
                    if self.og_out:
                        if len(img.shape) == 3:
                            self.out = np.concatenate(out,
                                                      np.expand_dims(img, 0))
                        else:
                            self.out = np.concatenate(out, img)
                    if self.format == "NCHW":
                        self.out = np.transpose(out, [0, 3, 1, 2])
                    outp = (self.out[self.iout], y)
                else:
                    self.iout += 1
                    outp = (self.out[self.iout], self.Y[self.iimg])
                return (outp)

            def rung(self):
                for ix, img in enumerate(self.inputd):
                    out = np.broadcast_to(img, (self.num_out, img.shape[-3:]))
                    out = self.AUG.augment_images(out[self.og_out:])
                    y = self.Y[ix]
                    if self.og_out:
                        if len(img.shape) == 3:
                            out = np.concatenate(out, np.expand_dims(img, 0))
                        else:
                            out = np.concatenate(out, img)
                    if self.format == "NCHW":
                        out = (np.transpose(out, [0, 3, 1, 2]))
                    for sout in out:
                        yield (sout, y)
Пример #17
0
def rand_flip(matrix):
    random_flip = iaa.Sometimes(0.5, iaa.Fliplr(1.0))
    return random_flip(images=matrix)
Пример #18
0
def generate(data_dir,
             batch_size=16,
             image_size=640,
             min_text_size=8,
             shrink_ratio=0.4,
             thresh_min=0.3,
             thresh_max=0.7,
             is_training=True):
    #split = 'train' if is_training else 'test'
    split = 'train'
    with open(osp.join(data_dir, f'{split}_list.txt')) as f:
        image_fnames = f.readlines()
        image_paths = [
            osp.join(data_dir, f'{split}_images', image_fname.strip())
            for image_fname in image_fnames
        ]
        #gt_paths = [osp.join(data_dir, f'{split}_gts', image_fname.strip()).replace('.jpg', '.txt') for image_fname in image_fnames]
        gt_paths = [
            osp.join(
                data_dir, f'{split}_gts', '/'.join([
                    osp.split(image_fname.strip())[0],
                    'gt_' + osp.split(image_fname.strip())[-1]
                ]).replace('.jpg', '.txt').lstrip('/'))
            for image_fname in image_fnames
        ]
    transform_aug = iaa.Sequential([
        iaa.Fliplr(0.5),
        iaa.Affine(rotate=(-10, 10)),
        iaa.Resize((0.5, 3.0))
    ])
    dataset_size = len(image_paths)
    indices = np.arange(dataset_size)
    if is_training:
        np.random.shuffle(indices)
    current_idx = 0
    b = 0
    while True:
        if current_idx >= dataset_size:
            if is_training:
                np.random.shuffle(indices)
            current_idx = 0
        if b == 0:
            # Init batch arrays
            batch_images = np.zeros([batch_size, image_size, image_size, 3],
                                    dtype=np.float32)
            batch_gts = np.zeros([batch_size, image_size, image_size],
                                 dtype=np.float32)
            batch_masks = np.zeros([batch_size, image_size, image_size],
                                   dtype=np.float32)
            batch_thresh_maps = np.zeros([batch_size, image_size, image_size],
                                         dtype=np.float32)
            batch_thresh_masks = np.zeros([batch_size, image_size, image_size],
                                          dtype=np.float32)
            batch_loss = np.zeros([
                batch_size,
            ], dtype=np.float32)
        i = indices[current_idx]
        image_path = image_paths[i]
        #anns = all_anns[i]
        anns = load_anns(gt_paths[i])
        image = cv2.imread(image_path)
        # show_polys(image.copy(), anns, 'before_aug')
        if is_training:
            transform_aug = transform_aug.to_deterministic()
            image, anns = transform(transform_aug, image, anns)
            image, anns = crop(image, anns)
        image, anns = resize(image_size, image, anns)
        # show_polys(image.copy(), anns, 'after_aug')
        # cv2.waitKey(0)
        anns = [ann for ann in anns if Polygon(ann['poly']).is_valid]
        gt = np.zeros((image_size, image_size), dtype=np.float32)
        mask = np.ones((image_size, image_size), dtype=np.float32)
        thresh_map = np.zeros((image_size, image_size), dtype=np.float32)
        thresh_mask = np.zeros((image_size, image_size), dtype=np.float32)
        for ann in anns:
            poly = np.array(ann['poly'])
            height = max(poly[:, 1]) - min(poly[:, 1])
            width = max(poly[:, 0]) - min(poly[:, 0])
            polygon = Polygon(poly)
            # generate gt and mask
            #if polygon.area < 1 or min(height, width) < min_text_size or ann['text'] == '###':
            if polygon.area < 1 or min(height, width) < min_text_size:
                cv2.fillPoly(mask, poly.astype(np.int32)[np.newaxis, :, :], 0)
                continue
            else:
                distance = polygon.area * (
                    1 - np.power(shrink_ratio, 2)) / polygon.length
                subject = [tuple(l) for l in ann['poly']]
                padding = pyclipper.PyclipperOffset()
                padding.AddPath(subject, pyclipper.JT_ROUND,
                                pyclipper.ET_CLOSEDPOLYGON)
                shrinked = padding.Execute(-distance)
                if len(shrinked) == 0:
                    cv2.fillPoly(mask,
                                 poly.astype(np.int32)[np.newaxis, :, :], 0)
                    continue
                else:
                    shrinked = np.array(shrinked[0]).reshape(-1, 2)
                    if shrinked.shape[0] > 2 and Polygon(shrinked).is_valid:
                        cv2.fillPoly(gt, [shrinked.astype(np.int32)], 1)
                    else:
                        cv2.fillPoly(mask,
                                     poly.astype(np.int32)[np.newaxis, :, :],
                                     0)
                        continue
            # generate thresh map and thresh mask
            draw_thresh_map(ann['poly'],
                            thresh_map,
                            thresh_mask,
                            shrink_ratio=shrink_ratio)
        thresh_map = thresh_map * (thresh_max - thresh_min) + thresh_min

        image = image.astype(np.float32)
        image[..., 0] -= mean[0]
        image[..., 1] -= mean[1]
        image[..., 2] -= mean[2]
        batch_images[b] = image
        batch_gts[b] = gt
        batch_masks[b] = mask
        batch_thresh_maps[b] = thresh_map
        batch_thresh_masks[b] = thresh_mask

        b += 1
        current_idx += 1
        if b == batch_size:
            inputs = [
                batch_images, batch_gts, batch_masks, batch_thresh_maps,
                batch_thresh_masks
            ]
            outputs = batch_loss
            yield inputs, outputs
            b = 0
Пример #19
0
def _create_augment_pipeline():

    # augmentors by https://github.com/aleju/imgaug
    sometimes = lambda aug: iaa.Sometimes(0.2, aug)

    # Define our sequence of augmentation steps that will be applied to every image
    # All augmenters with per_channel=0.5 will sample one value _per image_
    # in 50% of all cases. In all other cases they will sample new values
    # _per channel_.
    aug_pipe = iaa.Sequential(
        [
            # apply the following augmenters to most images
            iaa.Fliplr(0.5),  # horizontally flip 50% of all images
            #iaa.Flipud(0.2),  # vertically flip 20% of all images

            # execute 0 to 5 of the following (less important) augmenters per image
            # don't execute all of them, as that would often be way too strong
            iaa.SomeOf(
                (0, 2),
                [
                    iaa.Affine(scale={
                        "x": (0.8, 1.2),
                        "y": (0.8, 1.2)
                    }, ),
                    iaa.Affine(translate_percent={
                        "x": (-0.2, 0.2),
                        "y": (-0.2, 0.2)
                    }, ),
                    iaa.Affine(rotate=(-15, 15), ),
                    iaa.Affine(shear=(-15, 15)),
                    iaa.OneOf([
                        iaa.GaussianBlur(
                            (0, 3.0
                             )),  # blur images with a sigma between 0 and 3.0
                        iaa.AverageBlur(k=(2, 7)),
                        # blur image using local means (kernel sizes between 2 and 7)
                        iaa.MedianBlur(k=(3, 11)),
                        # blur image using local medians (kernel sizes between 2 and 7)
                    ]),
                    iaa.Sharpen(alpha=(0, 1.0),
                                lightness=(0.75, 1.5)),  # sharpen images
                    iaa.AdditiveGaussianNoise(
                        loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
                    # add gaussian noise
                    iaa.OneOf([
                        iaa.Dropout(
                            (0.01, 0.1), per_channel=0.5
                        ),  # randomly remove up to 10% of the pixels
                        iaa.CoarseDropout((0.03, 0.15),
                                          size_percent=(0.02, 0.05),
                                          per_channel=0.2),
                    ]),
                    iaa.Add((-10, 10),
                            per_channel=0.5),  # change brightness of images
                    iaa.Multiply(
                        (0.5, 1.5),
                        per_channel=0.5),  # change brightness of images
                    iaa.ContrastNormalization(
                        (0.5, 2.0),
                        per_channel=0.5),  # improve or worsen the contrast
                ],
                random_order=True)
        ],
        random_order=True)

    return aug_pipe
Пример #20
0
def draw_per_augmenter_images(imgName):
    print("[draw_per_augmenter_images] Loading image...")
    image = misc.imresize(ndimage.imread(imgName), (227, 227))

    #keypoints = [ia.Keypoint(x=43, y=43), ia.Keypoint(x=78, y=40), ia.Keypoint(x=64, y=73)] # left eye, right eye, mouth
    #keypoints = [ia.Keypoint(x=34, y=15), ia.Keypoint(x=85, y=13), ia.Keypoint(x=63, y=73)] # left ear, right ear, mouth
    #keypoints = [ia.KeypointsOnImage(keypoints, shape=image.shape)]

    print("[draw_per_augmenter_images] Initializing...")
    rows_augmenters = [
        #("Noop", [("", iaa.Noop()) for _ in sm.xrange(5)]),
        #("Crop", [iaa.Crop(px=vals) for vals in [(2, 4), (4, 8), (6, 16), (8, 32), (10, 64)]]),
        ("Crop\n(top, right,\nbottom, left)", [(str(vals), iaa.Crop(px=vals)) for vals in [(2, 0, 0, 0), (0, 8, 8, 0), (4, 0, 16, 4), (8, 0, 0, 32), (32, 64, 0, 0)]]),
        ("Fliplr", [(str(p), iaa.Fliplr(p)) for p in [0, 0, 1, 1, 1]]),
        ("Flipud", [(str(p), iaa.Flipud(p)) for p in [0, 0, 1, 1, 1]]),
        ("Superpixels\np_replace=1", [("n_segments=%d" % (n_segments,), iaa.Superpixels(p_replace=1.0, n_segments=n_segments)) for n_segments in [25, 50, 75, 100, 125]]),
        ("Superpixels\nn_segments=100", [("p_replace=%.2f" % (p_replace,), iaa.Superpixels(p_replace=p_replace, n_segments=100)) for p_replace in [0, 0.25, 0.5, 0.75, 1.0]]),
        ("Invert", [("p=%d" % (p,), iaa.Invert(p=p)) for p in [0, 0, 1, 1, 1]]),
        ("Invert\n(per_channel)", [("p=%.2f" % (p,), iaa.Invert(p=p, per_channel=True)) for p in [0.5, 0.5, 0.5, 0.5, 0.5]]),
        ("Add", [("value=%d" % (val,), iaa.Add(val)) for val in [-45, -25, 0, 25, 45]]),
        ("Add\n(per channel)", [("value=(%d, %d)" % (vals[0], vals[1],), iaa.Add(vals, per_channel=True)) for vals in [(-55, -35), (-35, -15), (-10, 10), (15, 35), (35, 55)]]),
        ("Multiply", [("value=%.2f" % (val,), iaa.Multiply(val)) for val in [0.25, 0.5, 1.0, 1.25, 1.5]]),
        ("Multiply\n(per channel)", [("value=(%.2f, %.2f)" % (vals[0], vals[1],), iaa.Multiply(vals, per_channel=True)) for vals in [(0.15, 0.35), (0.4, 0.6), (0.9, 1.1), (1.15, 1.35), (1.4, 1.6)]]),
        ("GaussianBlur", [("sigma=%.2f" % (sigma,), iaa.GaussianBlur(sigma=sigma)) for sigma in [0.25, 0.50, 1.0, 2.0, 4.0]]),
        ("AverageBlur", [("k=%d" % (k,), iaa.AverageBlur(k=k)) for k in [1, 3, 5, 7, 9]]),
        ("MedianBlur", [("k=%d" % (k,), iaa.MedianBlur(k=k)) for k in [1, 3, 5, 7, 9]]),
        ("Sharpen\n(alpha=1)", [("lightness=%.2f" % (lightness,), iaa.Sharpen(alpha=1, lightness=lightness)) for lightness in [0, 0.5, 1.0, 1.5, 2.0]]),
        ("Emboss\n(alpha=1)", [("strength=%.2f" % (strength,), iaa.Emboss(alpha=1, strength=strength)) for strength in [0, 0.5, 1.0, 1.5, 2.0]]),
        ("EdgeDetect", [("alpha=%.2f" % (alpha,), iaa.EdgeDetect(alpha=alpha)) for alpha in [0.0, 0.25, 0.5, 0.75, 1.0]]),
        ("DirectedEdgeDetect\n(alpha=1)", [("direction=%.2f" % (direction,), iaa.DirectedEdgeDetect(alpha=1, direction=direction)) for direction in [0.0, 1*(360/5)/360, 2*(360/5)/360, 3*(360/5)/360, 4*(360/5)/360]]),
        ("AdditiveGaussianNoise", [("scale=%.2f*255" % (scale,), iaa.AdditiveGaussianNoise(scale=scale * 255)) for scale in [0.025, 0.05, 0.1, 0.2, 0.3]]),
        ("AdditiveGaussianNoise\n(per channel)", [("scale=%.2f*255" % (scale,), iaa.AdditiveGaussianNoise(scale=scale * 255, per_channel=True)) for scale in [0.025, 0.05, 0.1, 0.2, 0.3]]),
        ("Dropout", [("p=%.2f" % (p,), iaa.Dropout(p=p)) for p in [0.025, 0.05, 0.1, 0.2, 0.4]]),
        ("Dropout\n(per channel)", [("p=%.2f" % (p,), iaa.Dropout(p=p, per_channel=True)) for p in [0.025, 0.05, 0.1, 0.2, 0.4]]),
        ("CoarseDropout\n(p=0.2)", [("size_percent=%.2f" % (size_percent,), iaa.CoarseDropout(p=0.2, size_percent=size_percent, min_size=2)) for size_percent in [0.3, 0.2, 0.1, 0.05, 0.02]]),
        ("CoarseDropout\n(p=0.2, per channel)", [("size_percent=%.2f" % (size_percent,), iaa.CoarseDropout(p=0.2, size_percent=size_percent, per_channel=True, min_size=2)) for size_percent in [0.3, 0.2, 0.1, 0.05, 0.02]]),
        ("ContrastNormalization", [("alpha=%.1f" % (alpha,), iaa.ContrastNormalization(alpha=alpha)) for alpha in [0.5, 0.75, 1.0, 1.25, 1.50]]),
        ("ContrastNormalization\n(per channel)", [("alpha=(%.2f, %.2f)" % (alphas[0], alphas[1],), iaa.ContrastNormalization(alpha=alphas, per_channel=True)) for alphas in [(0.4, 0.6), (0.65, 0.85), (0.9, 1.1), (1.15, 1.35), (1.4, 1.6)]]),
        ("Grayscale", [("alpha=%.1f" % (alpha,), iaa.Grayscale(alpha=alpha)) for alpha in [0.0, 0.25, 0.5, 0.75, 1.0]]),
        ("PiecewiseAffine", [("scale=%.3f" % (scale,), iaa.PiecewiseAffine(scale=scale)) for scale in [0.015, 0.03, 0.045, 0.06, 0.075]]),
        ("Affine: Scale", [("%.1fx" % (scale,), iaa.Affine(scale=scale)) for scale in [0.1, 0.5, 1.0, 1.5, 1.9]]),
        ("Affine: Translate", [("x=%d y=%d" % (x, y), iaa.Affine(translate_px={"x": x, "y": y})) for x, y in [(-32, -16), (-16, -32), (-16, -8), (16, 8), (16, 32)]]),
        ("Affine: Rotate", [("%d deg" % (rotate,), iaa.Affine(rotate=rotate)) for rotate in [-90, -45, 0, 45, 90]]),
        ("Affine: Shear", [("%d deg" % (shear,), iaa.Affine(shear=shear)) for shear in [-45, -25, 0, 25, 45]]),
        ("Affine: Modes", [(mode, iaa.Affine(translate_px=-32, mode=mode)) for mode in ["constant", "edge", "symmetric", "reflect", "wrap"]]),
        ("Affine: cval", [("%d" % (int(cval*255),), iaa.Affine(translate_px=-32, cval=int(cval*255), mode="constant")) for cval in [0.0, 0.25, 0.5, 0.75, 1.0]]),
        (
            "Affine: all", [
                (
                    "",
                    iaa.Affine(
                        scale={"x": (0.5, 1.5), "y": (0.5, 1.5)},
                        translate_px={"x": (-32, 32), "y": (-32, 32)},
                        rotate=(-45, 45),
                        shear=(-32, 32),
                        mode=ia.ALL,
                        cval=(0.0, 1.0)
                    )
                )
                for _ in sm.xrange(5)
            ]
        ),
        ("ElasticTransformation\n(sigma=0.2)", [("alpha=%.1f" % (alpha,), iaa.ElasticTransformation(alpha=alpha, sigma=0.2)) for alpha in [0.1, 0.5, 1.0, 3.0, 9.0]])
    ]

    print("[draw_per_augmenter_images] Augmenting...")
    i = 0
    for (row_name, augmenters) in rows_augmenters:
        for img_title, augmenter in augmenters:
            aug_det = augmenter.to_deterministic()
            image_aug = aug_det.augment_image(image)
            #print(outputDir + os.path.splitext(os.path.basename(imgName))[0] + "_%04d.jpg"% (i,))
            misc.imsave(outputDir + os.path.splitext(os.path.basename(imgName))[0] + "_%04d.jpg" % (i,), image_aug)
            i += 1
Пример #21
0
 def processor(self):
     return iaa.Fliplr(1)
    def __init__(self, images, config, shuffle=True, jitter=True, norm=None):
        self.generator = None

        self.images = images
        self.config = config

        self.shuffle = shuffle
        self.jitter = jitter
        self.norm = norm

        self.anchors = [BoundBox(0, 0, config['ANCHORS'][2*i], config['ANCHORS'][2*i+1])\
                for i in range(int(len(config['ANCHORS'])//2))]

        ### augmentors by https://github.com/aleju/imgaug
        sometimes = lambda aug: iaa.Sometimes(0.33, aug)

        # Define our sequence of augmentation steps that will be applied to every image
        # All augmenters with per_channel=0.5 will sample one value _per image_
        # in 50% of all cases. In all other cases they will sample new values
        # _per channel_.
        self.aug_pipe = iaa.Sequential(
            [
                # apply the following augmenters to most images
                iaa.Fliplr(0.5),  # horizontally flip 50% of all images
                #iaa.Flipud(0.2), # vertically flip 20% of all images
                sometimes(iaa.Crop(percent=(0, 0.1))
                          ),  # crop images by 0-10% of their height/width
                sometimes(
                    iaa.Affine(
                        scale={
                            "x": (0.85, 1.15),
                            "y": (0.85, 1.15)
                        },  # scale images to 80-120% of their size, individually per axis
                        translate_percent={
                            "x": (-0.15, 0.15),
                            "y": (-0.15, 0.15)
                        },  # translate by -20 to +20 percent (per axis)
                        rotate=(-5, 5),  # rotate by -45 to +45 degrees
                        #shear=(-5, 5), # shear by -16 to +16 degrees
                        #order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
                        #cval=(0, 255), # if mode is constant, use a cval between 0 and 255
                        #mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                    )),
                # execute 0 to 5 of the following (less important) augmenters per image
                # don't execute all of them, as that would often be way too strong
                iaa.SomeOf(
                    (0, 5),
                    [
                        #sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
                        iaa.OneOf([
                            iaa.GaussianBlur(
                                (0, 2.0)
                            ),  # blur images with a sigma between 0 and 3.0
                            # iaa.AverageBlur(k=(2, 5)), # blur image using local means with kernel sizes between 2 and 7
                            # iaa.MedianBlur(k=(3, 6)), # blur image using local medians with kernel sizes between 2 and 7
                        ]),
                        iaa.Sharpen(alpha=(0, 1.0),
                                    lightness=(0.75, 1.5)),  # sharpen images
                        #iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
                        # search either for all edges or for directed edges
                        #sometimes(iaa.OneOf([
                        #    iaa.EdgeDetect(alpha=(0, 0.7)),
                        #    iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)),
                        #])),
                        iaa.AdditiveGaussianNoise(
                            loc=0, scale=(0.0, 0.04 * 255),
                            per_channel=0.5),  # add gaussian noise to images
                        iaa.OneOf([
                            iaa.Dropout(
                                (0.01, 0.05), per_channel=0.5
                            ),  # randomly remove up to 5% of the pixels
                            #iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
                        ]),
                        iaa.Invert(0.05,
                                   per_channel=True),  # invert color channels
                        iaa.Add(
                            (-10, 10), per_channel=0.5
                        ),  # change brightness of images (by -10 to 10 of original value)
                        iaa.Multiply(
                            (0.75, 1.25), per_channel=0.5
                        ),  # change brightness of images (50-150% of original value)
                        iaa.ContrastNormalization(
                            (0.7, 1.3),
                            per_channel=0.5),  # improve or worsen the contrast
                        #iaa.Grayscale(alpha=(0.0, 1.0)),
                        #sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
                        #sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))) # sometimes move parts of the image around
                    ],
                    random_order=True)
            ],
            random_order=True)

        if shuffle: np.random.shuffle(self.images)
Пример #23
0
    def createSequence(self):

        seq_affine = []
        seq_non_affine = []
        seq_noise = []

        def sometimes_affine(aug): return iaa.Sometimes(
            self.default_param['affine_probability']/100, aug)

        def sometimes_non_affine(aug): return iaa.Sometimes(
            self.default_param['non_affine_probability']/100, aug)

        def sometimes_noise(aug): return iaa.Sometimes(
            self.default_param['noise_probability']/100, aug)

        affine = {k: self.default_param[k] for k in self.default_param.keys() if (
            ('affine' in k) and (not 'non' in k) and self.default_param[k])}
        if affine != {}:
            affine_seq = {}
            for a in affine:
                if a == 'affine_flip_horizontal':
                    seq_affine.append(sometimes_affine(
                        iaa.Flipud(affine[a]/100)))
                elif a == 'affine_flip_vertical':
                    seq_affine.append(sometimes_affine(
                        iaa.Fliplr(affine[a]/100)))
                elif a == 'affine_rotate':
                    affine_seq['rotate'] = (-affine[a], affine[a])
                elif a == 'affine_scale':
                    affine_seq['scale'] = {
                        "x": (1, 1+affine[a]/100), "y": (1, 1+affine[a]/100)}
                elif a == 'affine_shear':
                    affine_seq['shear'] = (-affine[a], affine[a])
            if affine_seq != {}:
                affine_seq['mode'] = 'reflect'
                seq_affine.append(sometimes_affine(iaa.Affine(**affine_seq)))

        non_affine = {k: self.default_param[k] for k in self.default_param.keys() if (
            ('non_affine' in k) and self.default_param[k])}
        if non_affine != {}:
            for a in non_affine:
                if a == 'non_affine_brightness':
                    seq_non_affine.append(sometimes_non_affine(iaa.Multiply(
                        (1-non_affine[a]/100, 1+non_affine[a]/100), per_channel=True)))

                elif a == 'non_affine_contrast':
                    seq_non_affine.append(sometimes_non_affine(
                        iaa.ContrastNormalization((1-non_affine[a]/100, 1+non_affine[a]/100))))
                elif a == 'non_affine_emboss':
                    seq_non_affine.append(sometimes_non_affine(iaa.Emboss(
                        alpha=(non_affine[a]/200, non_affine[a]/100), strength=(0, non_affine[a]/50))))
                elif a == 'non_affine_grayscale':
                    seq_non_affine.append(sometimes_non_affine(
                        iaa.Grayscale(alpha=(non_affine[a]/200, non_affine[a]/100))))
                elif a == 'non_affine_saturation':
                    seq_non_affine.append(sometimes_non_affine(
                        iaa.AddToHueAndSaturation((-non_affine[a], non_affine[a]))))
                elif a == 'non_affine_shrpen':
                    seq_non_affine.append(sometimes_non_affine(iaa.Sharpen(alpha=(
                        non_affine[a]/200, non_affine[a]/100), lightness=(1-non_affine[a]/100, 1+non_affine[a]/100))))

        noise = {k: self.default_param[k] for k in self.default_param.keys() if (
            ('noise' in k) and self.default_param[k])}
        if noise != {}:
            for a in noise:
                if a == 'noise_blur':
                    seq_noise.append(sometimes_noise(
                        iaa.GaussianBlur((noise[a]/2, noise[a]))))
                elif a == 'noise_dropout':
                    seq_noise.append(sometimes_noise(
                        iaa.Dropout((noise[a]/200, noise[a]/100))))
                elif a == 'noise_frequency':
                    seq_noise.append(sometimes_noise(
                        iaa.FrequencyNoiseAlpha(exponent=(-noise[a], noise[a]))))
                elif a == 'noise_noise':
                    seq_noise.append(sometimes_noise(
                        iaa.AdditiveGaussianNoise((255*noise[a]/200, 255*noise[a]/100))))
                elif a == 'noise_salt_and_pepper':
                    seq_noise.append(sometimes_noise(
                        iaa.SaltAndPepper(p=(noise[a]/200, noise[a]/100))))

        return {'affine': seq_affine, 'non_affine': seq_non_affine, 'noise': seq_noise}
def tf_softmax(x):
    return tf.nn.softmax(x)


def unpickle(file):
    import pickle
    with open(file, 'rb') as fo:
        dict = pickle.load(fo, encoding='bytes')
    return dict


# Data Augmentation
seq = iaa.Sequential(
    [
        iaa.Fliplr(0.5),  # horizontal flips
        iaa.Crop(percent=(0, 0.1)),  # random crops
        # Small gaussian blur with random sigma between 0 and 0.5.
        # But we only blur about 50% of all images.
        iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 0.5))),
        # Strengthen or weaken the contrast in each image.
        iaa.ContrastNormalization((0.75, 1.5)),
        # Add gaussian noise.
        # For 50% of all images, we sample the noise once per pixel.
        # For the other 50% of all images, we sample the noise per pixel AND
        # channel. This can change the color (not only brightness) of the
        # pixels.
        iaa.AdditiveGaussianNoise(
            loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
        # Make some images brighter and some darker.
        # In 20% of all cases, we sample the multiplier once per channel,
Пример #25
0
    def __init__(self, p=.5, key_source='image', key_target=None):
        super(FlipLR, self).__init__(key_source=key_source,
                                     key_target=key_target)

        self.sequence = iaa.Fliplr(p=p)
Пример #26
0
import numpy as np
import imgaug as ia
from imgaug import augmenters as iaa

ia.seed(1)
np.random.seed(1)

augmentations = [{
    'common': True,
    'seq': iaa.Flipud(1)
}, {
    'common': True,
    'seq': iaa.Fliplr(1)
}, {
    'common': False,
    'seq': iaa.GaussianBlur(sigma=(0.0, 3.0))
}, {
    'common': False,
    'seq': iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0))
}]


def image_augmentation(img, mask):
    """
    Batch wise image augmentation. Randomly augments the images with flip, sharpen and/or gaussian blur
    :param img:
    :param mask:
    :return:
    """
    for aug in augmentations:
        if not aug['common']:
Пример #27
0
 
def check_dir_or_create(dir):
    if not os.path.exists(dir):
        os.makedirs(dir)
 
# Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
# e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
sometimes = lambda aug: iaa.Sometimes(0.5, aug)
 
# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
 
augmenters = [
    iaa.Fliplr(0.25), # left/right flips
    iaa.Flipud(0.25), # up/down flips
    iaa.Crop(percent=(0, 0.1)), # random crops
    iaa.Affine(
        scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
        translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
        rotate=(-25, 25),
        shear=(-8, 8)
    )
]
 
seq = iaa.Sequential(augmenters, random_order=True)
 
files = [y for x in os.walk(INPUT)
         for y in glob(os.path.join(x[0], '*')) if os.path.isfile(y)]
Пример #28
0
 def __init__(self):
     self.imgaug_transform = iaa.Fliplr(p=1)
Пример #29
0
     X_train_aug_accu.append(get_accumEdged(X_aug))
     y_train_aug.append(y_aug)
 
     # Save the augmented pictures to the exact folder
     name = X_train_names[img_count].replace(".bmp", "")
     name = name + "_" + str(img_count) + "_rot_" + str(rotate_angle) + ".bmp"
     X_train_aug_names.append(name)
     cv2.imwrite(Aug_train_data_folder + name, X_aug)
     
     # Draw Troughs and Save Image
     X_aug = draw_troughs(X_aug, y_aug)
     cv2.imwrite(Aug_data_with_points_folder + name + "_" + str(img_count) +
                 "_rot_" + str(rotate_angle) + ".bmp", X_aug)
     
     # Augmentation_2 (Fliplr + Rotation)
     images_lr , keypoints_lr = iaa.Fliplr(1.0)(images = image, 
                                          keypoints = point)
     X_aug , y_aug = iaa.Affine(rotate = rotate_angle)(images = images_lr, 
                                                keypoints = keypoints_lr)
     # Again Reshaping to before format
     X_aug = X_aug.reshape((image.shape[1], image.shape[2], image.shape[3]))
     y_aug = np.array(y_aug).reshape((2, 2))
     # Append samples after augmentation
     X_train_aug_bmp.append(X_aug)
     X_train_aug_gray.append(cv2.cvtColor(X_aug, cv2.COLOR_BGR2GRAY))
     X_train_aug_accu.append(get_accumEdged(X_aug))
     y_train_aug.append(y_aug)
 
     # Save the augmented pictures to the exact folder
     name = X_train_names[img_count].replace(".bmp", "")
     name = name + "_" + str(img_count) + "_flrot_" + str(rotate_angle) + ".bmp"
     X_train_aug_names.append(name)
Пример #30
0
    def make_synthetic_image(self, file_base, added_images):

        img_base = cv2.imread(join(self.bg_path, file_base))

        # Store mask labels for later training, i.e. stores the corresponding object label for every mask channel
        save_name = ''
        mask_labels = []
        mask_overlayed = np.zeros((img_base.shape[0], img_base.shape[1]),
                                  dtype=np.uint8)[:, :, None]

        # Get rid of placeholder channel entry
        mask_overlayed = mask_overlayed[:, :, 1:]

        if len(mask_overlayed.shape) < 3:
            mask_overlayed = mask_overlayed[:, :, np.newaxis]
        img_overlayed = copy(img_base)

        # Perturb background
        scale = np.random.uniform(0.6, 1.1)
        img_perturbed = copy(img_overlayed)
        img_perturbed = (img_perturbed * scale).astype(np.uint8)
        img_perturbed[np.where(img_perturbed > 255)] = 255
        img_perturbed[np.where(img_perturbed < 0)] = 0
        img_overlayed = img_perturbed

        for i, file_added in enumerate(added_images):
            # Read image to be added on top

            print("Added image: ", file_added)

            img_added = cv2.imread(
                join(self.mask_root_path,
                     file_added.split('_')[0], file_added))

            if file_base.endswith('.jpg'):
                mask_added = np.load(
                    join(self.mask_root_path,
                         file_added.split('_')[0],
                         file_added.split('.jpg')[0] + '.npy'))
            else:
                mask_added = np.load(
                    join(self.mask_root_path,
                         file_added.split('_')[0],
                         file_added.split('.png')[0] + '.npy'))

            mask_labels.append(self.mask_root_path.split('/')[-1])

            # Mask image
            # img_added_masked = img_added * mask_added[:,:,np.newaxis]

            aff = iaa.Sequential([
                iaa.Fliplr(0.5),  # horizontally flip 50% of all images
                iaa.Flipud(0.2),  # vertically flip 20% of all images
                iaa.Affine(scale={
                    "x": (0.7, 1.3),
                    "y": (0.7, 1.3)
                },
                           translate_percent={
                               "x": (-0.2, 0.2),
                               "y": (-0.2, 0.2)
                           },
                           rotate=(-90, 90),
                           shear=(-15, 15))
            ])
            # Affine transform
            aff_det = aff.to_deterministic()
            img_added = aff_det.augment_image(img_added)
            mask_added = aff_det.augment_image(mask_added)

            # Static Transform
            st = iaa.SomeOf(
                (0, 3),
                [
                    iaa.AdditiveGaussianNoise(
                        loc=0, scale=(0.0, 0.02 * 255), per_channel=0.5),

                    # Change brightness of images (50-150% of original value).
                    iaa.Multiply((0.8, 1.2), per_channel=0.0),

                    # Improve or worsen the contrast of images.
                    iaa.ContrastNormalization((0.8, 1.2), per_channel=0.5),
                    iaa.OneOf([
                        iaa.Dropout((0.005, 0.01), per_channel=0.0),
                        iaa.CoarseDropout((0.01, 0.05),
                                          size_percent=(0.01, 0.05),
                                          per_channel=0.0),
                    ]),
                    # Add a value of -10 to 10 to each pixel.
                    iaa.Add((-10, 30), per_channel=0.0),
                ])
            img_added = st.augment_image(img_added)
            img_added_masked = img_added * mask_added[:, :, np.newaxis]

            # # Augment masks
            # img_added_masked, mask_added = self.translate_mask(img_added_masked, mask_added, \
            #                                                 row_shift=randint(-MAX_SHIFT_ROW, MAX_SHIFT_ROW), \
            #                                                 col_shift=randint(-MAX_SHIFT_COL, MAX_SHIFT_COL))
            # img_added_masked, mask_added = self.rotate_mask(img_added_masked, mask_added, \
            #                                                 angle=randint(-180,180,1), center=None, \
            #                                                 scale=np.random.uniform(0.9, 1.1))
            # img_added_masked, mask_added = self.perturb_intensity(img_added_masked, mask_added, scale=np.random.uniform(0.9,1.1))

            # Apply masks
            img_overlayed[np.where(
                mask_added == 1)] = img_added_masked[np.where(mask_added == 1)]
            for j in range(mask_overlayed.shape[-1]):
                mask_overlayed[:, :, j] *= np.logical_not(mask_added)
            mask_overlayed = np.concatenate([mask_overlayed, \
                                    mask_added[:, :, np.newaxis]], axis=2)
            # Save image and mask
            if i > 0: connector = '_'
            else: connector = ''

            if file_base.endswith('.jpg'):
                save_name += connector + file_added.split('.jpg')[0]
            else:
                save_name += connector + file_added.split('.png')[0]

        # if same overlay combo exists, assign unique suffix
        save_name += '-0'
        while os.path.exists(join(self.save_path, save_name + '.jpg')):
            index = int(save_name.split('-')[-1][0])
            save_name = save_name.split('-')[0] + '-' + str(index + 1)

        return img_overlayed, mask_overlayed, mask_labels, save_name