def test_aug():
    im=cv2.imread('/media/wsl/SB@data/dataset/瓶盖分类/dataset/单字检测/聪明盖/0_0.jpg')
    bboxes=read_labelme('/media/wsl/SB@data/dataset/瓶盖分类/dataset/单字检测/聪明盖/0_0.json')
    poly_on_img=PolygonsOnImage([Polygon(bbox) for bbox in bboxes],shape=im.shape)
    
    st = lambda aug: iaa.Sometimes(1, aug)
    seq = iaa.Sequential([
        #st(iaa.Pad(percent=((0, 0.2), (0, 0.2), (0, 0.2), (0, 0.2)), keep_size=False)),
        #
        # # st(iaa.Crop(percent=([0.0, 0.3], [0.00, 0.1], [0.0, 0.3], [0.0, 0.1]), keep_size=False)),
        st(iaa.Affine(scale=(0.9, 1.0), rotate=(-45, 45), shear=(-5, 5), translate_px={"x": (-16, 16), "y": (-10, 10)},
                      fit_output=True)),
        st(iaa.Add(value=(-10, 10), per_channel=True)),
        # st(iaa.PerspectiveTransform((0,0.1),fit_output=True)),
        # st(iaa.MultiplyAndAddToBrightness(mul=(0.6, 1.5), add=(0, 30))),
        st(iaa.ChangeColorTemperature(kelvin=(3000, 9100))),
        st(iaa.Sharpen(0, 0.1)),
        st(iaa.GaussianBlur((0, 1))),
        st(iaa.AddToHueAndSaturation((-2, 2))),
        st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.2), per_channel=True)),  # add gaussian noise to images
    ])
    for i in range(10):
        imgs_aug,poly_on_img_aug=seq(image=im,polygons=poly_on_img)
        
        res=poly_on_img_aug.draw_on_image(imgs_aug)
        a=poly_on_img_aug.to_xy_array()
        print(a.shape)
        cv2.imshow('a',res)
        cv2.waitKey(0)
 def __init__(self):
     st = lambda aug: iaa.Sometimes(0.5, aug)
     self.seq = iaa.Sequential([
         st(iaa.Pad(percent=((0, 0.2), (0, 0.2), (0, 0.2), (0, 0.2)), keep_size=False)),
         #
         #st(iaa.Crop(percent=([0.0, 0.1], [0.00, 0.1], [0.0, 0.1], [0.0, 0.1]), keep_size=False)),
         st(iaa.Affine(scale=(0.9, 1.0), rotate=(-30, 30), shear=(-5, 5),
                       translate_px={"x": (-30, 30), "y": (-10, 10)},
                       fit_output=True)),
         # st(iaa.PerspectiveTransform((0,0.1),fit_output=True)),
         # st(iaa.MultiplyAndAddToBrightness(mul=(0.6, 1.5), add=(0, 30))),
         st(iaa.ChangeColorTemperature(kelvin=(3000, 9100))),
         st(iaa.LinearContrast((0.75, 1.5))),
         st(iaa.GaussianBlur((0, 0.2))),
         # st(iaa.PerspectiveTransform(scale=0.05,)),
         st(iaa.AddToHueAndSaturation((-20, 20))),
         #
         st(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 16),
                                      per_channel=True)),  # add gaussian noise to images
         # # # #st(iaa.Dropout((0.0, 0.1), per_channel=0.5)),  # randomly remove up to 10% of the pixels
         # # # change brightness of images (by -10 to 10 of original value)
         st(iaa.Add((-40, 40), per_channel=True)),
         # # change brightness of images (50-150% of original value)
         st(iaa.Multiply((0.5, 1.5), per_channel=True)),
     ])
def main():
    image = ia.quokka_square()
    images_aug = []
    for kelvin in np.linspace(1000, 10000, 64):
        images_aug.append(iaa.ChangeColorTemperature(kelvin)(image=image))

    ia.imshow(ia.draw_grid(images_aug))
Ejemplo n.º 4
0
def chapter_augmenters_changecolortemperature():
    fn_start = "color/changecolortemperature"

    image = imageio.imread(
        os.path.join(INPUT_IMAGES_DIR, "Pahalgam_Valley.jpg"))
    image = ia.imresize_single_image(image, 0.2)

    aug = iaa.ChangeColorTemperature((1100, 10000))
    run_and_save_augseq(fn_start + ".jpg", aug, [image] * 8, cols=3, rows=2)
Ejemplo n.º 5
0
def get_preview(images, augmentationList):
    """
    Accepts a list of images and augmentationList as input.
    Provides a list of augmented images in that order as ouptut.
    """
    augmented = []
    for image in images:
        for augmentation in augmentationList:
            aug_id = augmentation['id']
            params = augmentation['params']
            if (aug_id == 1):
                image = iaa.SaltAndPepper(p=params[0],
                                          per_channel=params[1])(image=image)
            elif (aug_id == 2):
                image = iaa.imgcorruptlike.GaussianNoise(
                    severity=(params[0], params[1]))(image=image)
            elif (aug_id == 3):
                image = iaa.Rain(speed=(params[0], params[1]),
                                 drop_size=(params[2], params[3]))(image=image)
            elif (aug_id == 4):
                image = iaa.imgcorruptlike.Fog(
                    severity=(params[0], params[1]))(image=image)
            elif (aug_id == 5):
                image = iaa.imgcorruptlike.Snow(
                    severity=(params[0], params[1]))(image=image)
            elif (aug_id == 6):
                image = iaa.imgcorruptlike.Spatter(
                    severity=(params[0], params[1]))(image=image)
            elif (aug_id == 7):
                image = iaa.BlendAlphaSimplexNoise(
                    iaa.EdgeDetect(1))(image=image)
            elif (aug_id == 8):
                image = iaa.Rotate(rotate=(params[0], params[1]))(image=image)
            elif (aug_id == 9):
                image = iaa.Affine()(image=image)  #to be implemented
            elif (aug_id == 10):
                image = iaa.MotionBlur(k=params[0],
                                       angle=(params[1],
                                              params[2]))(image=image)
            elif (aug_id == 11):
                image = iaa.imgcorruptlike.ZoomBlur(
                    severity=(params[0], params[1]))(image=image)
            elif (aug_id == 12):
                image = iaa.AddToBrightness()(image=image)  #to be implemented
            elif (aug_id == 13):
                image = iaa.ChangeColorTemperature(
                    kelvin=(params[0], params[1]))(image=image)
            elif (aug_id == 14):
                image = iaa.SigmoidContrast()(image=image)  #to be implemented
            elif (aug_id == 15):
                image = iaa.Cutout(nb_iterations=(params[0], params[1]),
                                   size=params[2],
                                   squared=params[3])(image=image)
            else:
                print("Not implemented")
        augmented.append(image)
    return augmented
def generate_change_color_temperature():
    ia.seed(1)

    image = imageio.imread(
        os.path.join(INPUT_IMAGES_DIR, "Pahalgam_Valley.jpg"))
    image = ia.imresize_single_image(image, 0.12)
    images_aug = [image]
    for kelvin in np.linspace(1000, 5000, 7):
        images_aug.append(iaa.ChangeColorTemperature(kelvin)(image=image))
    _save("changecolortemperature.jpg", ia.draw_grid(images_aug,
                                                     cols=8,
                                                     rows=1))
Ejemplo n.º 7
0
	def Random(self, n=5):
		seq = iaa.Sequential([
	    iaa.AddToHueAndSaturation((-20,30),per_channel=True),
	    iaa.GaussianBlur(sigma=2),
	    iaa.AdditiveGaussianNoise(scale=0.2*255),
	    iaa.geometric.Affine(rotate=(-25, 25)),
	    iaa.Crop(px=(0, 100)),
	    iaa.Grayscale(alpha=(0.0, 1.0)),
	    iaa.ChangeColorTemperature((1100, 10000))
		])
		images_aug=[seq(image=self.image) for _ in range(n)]
		print(len(images_aug))
		return images_aug, n 
Ejemplo n.º 8
0
def img_aug(image, coordinate_lists, h, w):
    BoundingBox_list = []
    for coordinate in coordinate_lists:
        BoundingBox_list.append(
            BoundingBox(x1=coordinate[0], y1=coordinate[1], x2=coordinate[2],
                        y2=coordinate[3]))

    bbs = BoundingBoxesOnImage(BoundingBox_list, (h, w))
    seq = iaa.Sequential([

        iaa.ChangeColorTemperature((3000, 40000)),
        iaa.Affine(translate_px={"x": (-150, 150), "y": (-150, 150)}, scale=(0.5, 1.7), rotate=(-15, 15))

    ])
    image_aug, bbs_aug = seq(image=image, bounding_boxes=bbs)
    return image_aug, bbs_aug
Ejemplo n.º 9
0
    def np_func(img, ann):
        # convert to imgaug keypoints
        keypoints = annotation_to_keypoints(img, ann)
        img = img.astype(np.uint8)

        p = 0.1
        seq = iaa.Sequential([
            iaa.Sometimes(p, iaa.Sequential([iaa.ShearY((-20, 20))])),
            iaa.Sometimes(p, iaa.ChangeColorTemperature((3500, 8000))),
            iaa.Sometimes(p, iaa.AddToBrightness((-15, 15))),
            iaa.Sometimes(p, iaa.AdditiveGaussianNoise(scale=(0, 0.03 * 255), per_channel=True))
        ])

        img, keypoints = seq(image=img, keypoints=keypoints)

        # convert from imgaug keypoints
        ann = keypoints_to_annotation(img, ann, keypoints)

        return img.astype(np.float32), ann
Ejemplo n.º 10
0
def _load_augmentation_aug_non_geometric():
    return iaa.Sequential([
        iaa.Sometimes(0.3, iaa.Multiply((0.5, 1.5), per_channel=0.5)),
        iaa.Sometimes(0.2, iaa.JpegCompression(compression=(70, 99))),
        iaa.Sometimes(0.2, iaa.GaussianBlur(sigma=(0, 3.0))),
        iaa.Sometimes(0.2, iaa.MotionBlur(k=15, angle=[-45, 45])),
        iaa.Sometimes(0.2, iaa.MultiplyHue((0.5, 1.5))),
        iaa.Sometimes(0.2, iaa.MultiplySaturation((0.5, 1.5))),
        iaa.Sometimes(
            0.34, iaa.MultiplyHueAndSaturation((0.5, 1.5), per_channel=True)),
        iaa.Sometimes(0.34, iaa.Grayscale(alpha=(0.0, 1.0))),
        iaa.Sometimes(0.2, iaa.ChangeColorTemperature((1100, 10000))),
        iaa.Sometimes(0.1, iaa.GammaContrast((0.5, 2.0))),
        iaa.Sometimes(0.2, iaa.SigmoidContrast(gain=(3, 10),
                                               cutoff=(0.4, 0.6))),
        iaa.Sometimes(0.1, iaa.CLAHE()),
        iaa.Sometimes(0.1, iaa.HistogramEqualization()),
        iaa.Sometimes(0.2, iaa.LinearContrast((0.5, 2.0), per_channel=0.5)),
        iaa.Sometimes(0.1, iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)))
    ])
Ejemplo n.º 11
0
def augment_image(im):

    arr = np.asarray(im, dtype=np.uint8)
    blurer = iaa.GaussianBlur(1 + iap.Uniform(0.1, 3.0))

    seq = iaa.Sequential([
        #iaa.Crop(px=(1, 16), keep_size=False),
        iaa.Fliplr(0.5),
        iaa.GaussianBlur(sigma=(0, 1.0)),
        iaa.ChangeColorTemperature((1100, 10000))
    ])

    #aug = iaa.Emboss(alpha=(0.0, 1.0), strength=(0.5, 1.5))
    #aug = iaa.EdgeDetect(alpha=(0.0, 1.0))
    #aug = iaa.ChangeColorTemperature((1100, 10000))

    aug_arr = seq(images=[arr])[0]
    im2 = Image.fromarray(aug_arr)
    #im2.show()
    #im.show()
    #Image.fromarray(np.hstack((np.array(im),np.array(im2)))).show()
    return im2
Ejemplo n.º 12
0
    def __init__(self, data, dtype):
        super().__init__(data, dtype)
        self.augmentationseq = iaa.Sequential([
            # iaa.PerspectiveTransform(scale=(0.01, 0.08), keep_size=True),
            iaa.Rotate(rotate=(0, 359)),
            iaa.ChangeColorTemperature((3000, 10000)),
            iaa.Affine(scale=(0.8, 1.2)),
            iaa.GammaContrast((0.8, 1.2)),
            iaa.Resize({
                'width': WIDTH,
                'height': HEIGHT
            },
                       interpolation=imgaug.ALL)
        ])

        self.resizeseq = iaa.Sequential([
            iaa.Resize({
                'width': WIDTH,
                'height': HEIGHT
            },
                       interpolation=imgaug.ALL)
        ])
Ejemplo n.º 13
0
def init_augmenter(img_mode="color"):
    """Initializes the augmenters used in the training dataset
    :param config: the config object that contains all the 
    """
    ia.seed(10)

    if img_mode == 'color':
        return iaa.Sequential([
            sometimes(iaa.Fliplr()),
            iaa.MultiplyBrightness((0.6, 1.4)),
            # TODO: try no ChangeColor or Brightness
            sometimes(iaa.ChangeColorTemperature((5000, 7000))),
            iaa.Crop(percent=(
                (0, 0.50),
                (0, 0.50),
                (0, 0.50),
                (0, 0.50)
            ))
            # sometimes(iaa.OneOf([
            #     iaa.Cutout(nb_iterations=(1, 4), size=0.2,
            #                squared=False, cval=(0, 255), fill_mode="constant"),
            #     iaa.Cutout(nb_iterations=(1, 4), size=0.2, squared=False, cval=(
            #         0, 255), fill_mode="gaussian", fill_per_channel=True),
            #     iaa.AdditiveGaussianNoise(scale=(0, 0.1*255))
            # ]))
        ])
    else:
        return iaa.Sequential([
            sometimes(iaa.Fliplr()),
            iaa.Crop(percent=(
                (0, 0.40),
                (0, 0.40),
                (0, 0.40),
                (0, 0.40)
            ))
        ])
Ejemplo n.º 14
0
import imgaug.augmenters as iaa
import random

import numpy as np
import cv2
from PIL import Image

aug_transform = iaa.SomeOf((0, None), [
    iaa.OneOf([
        iaa.MultiplyAndAddToBrightness(mul=(0.3, 1.6), add=(-50, 50)),
        iaa.MultiplyHueAndSaturation((0.5, 1.5), per_channel=True),
        iaa.ChannelShuffle(0.5),
        iaa.RemoveSaturation(),
        iaa.Grayscale(alpha=(0.0, 1.0)),
        iaa.ChangeColorTemperature((1100, 35000)),
    ]),
    iaa.OneOf([
        iaa.MedianBlur(k=(3, 7)),
        iaa.BilateralBlur(
            d=(3, 10), sigma_color=(10, 250), sigma_space=(10, 250)),
        iaa.MotionBlur(k=(3, 9), angle=[-45, 45]),
        iaa.MeanShiftBlur(spatial_radius=(5.0, 10.0),
                          color_radius=(5.0, 10.0)),
        iaa.AllChannelsCLAHE(clip_limit=(1, 10)),
        iaa.AllChannelsHistogramEqualization(),
        iaa.GammaContrast((0.5, 1.5), per_channel=True),
        iaa.GammaContrast((0.5, 1.5)),
        iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6), per_channel=True),
        iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6)),
        iaa.HistogramEqualization(),
        iaa.Sharpen(alpha=0.5)
def train_crossvalidation(**kwargs):

    check_args(kwargs, ['data_dir', 'saving_dir'])

    data_dir = kwargs.get('data_dir', None)
    saving_dir = kwargs.get('saving_dir', None)

    experiment_name = kwargs.get('experiment_name', 'model_training')
    learning_rate = kwargs.get('learning_rate', 0.00001)
    epochs = kwargs.get('epochs', 100)
    patience = kwargs.get('patience', 10)
    resnet_size = kwargs.get('resnet_size',
                             34)  # allowed sizes: 18,34,50,101,152
    num_workers = kwargs.get('num_workers', 4)
    batch_size = kwargs.get('batch_size', 64)
    weighted_loss = kwargs.get('weighted_loss', True)
    sample = kwargs.get('sample', 1.0)

    img_aug = kwargs.get('img_aug', 0.5)

    if not img_aug:
        img_aug_seq = None
    else:
        sometimes = lambda augmentation: iaa.Sometimes(img_aug, augmentation)
        img_aug_seq = iaa.Sequential([
            iaa.Fliplr(img_aug),
            sometimes(iaa.ChangeColorTemperature((1100, 10000))),
            sometimes(
                iaa.OneOf([
                    iaa.GaussianBlur(sigma=(0, 2.0)),
                    iaa.AddToHueAndSaturation((-10, 10))
                ]))
        ])

    create_dir(saving_dir)
    experiment_path = os.path.join(saving_dir, experiment_name)
    create_dir(experiment_path)

    #load data
    df = path2DataFrame(data_dir)

    df = df.groupby('category').apply(lambda x: x.sample(frac=sample))

    #remove after testing
    #df = df.sample(frac=0.1)

    X = df['file_path'].values
    y = df['category'].values
    y_encoded, class_index_dict = label_encoding(y)
    n_classes = len(class_index_dict)

    # GPU or CPU
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(device)

    #set loss
    if weighted_loss:
        weights = get_class_weights(y_encoded, class_index_dict)
        loss_function = nn.CrossEntropyLoss(
            reduction='sum', weight=torch.FloatTensor(weights).to(device))
    else:
        loss_function = nn.CrossEntropyLoss(reduction='sum')

    data_splits = make_train_val_test_splits(
        X,
        y_encoded,
        img_aug=img_aug_seq,
        num_workers=num_workers,
        batch_size=batch_size,
        splits=10,
    )

    #crossvalidation
    for i, split in enumerate(data_splits):
        print(f'split {i}\n')
        split_path = os.path.join(experiment_path, f'split_{i}')
        create_dir(split_path)

        trainloader = split['trainloader']
        valloader = split['valloader']
        testloader = split['testloader']

        print('size train: {}'.format(len(trainloader.dataset)))
        print('size val: {}'.format(len(valloader.dataset)))
        print('size test: {}'.format(len(testloader.dataset)))

        save_class_index(class_index_dict, split_path)

        #initialize model
        model = ResNet(resnet_size, n_classes).to(device)
        #set optimizer
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)

        model, history = train(model=model,
                               loss_function=loss_function,
                               optimizer=optimizer,
                               trainloader=trainloader,
                               valloader=valloader,
                               device=device,
                               saving_dir=split_path,
                               epochs=epochs,
                               patience=patience)

        # evaluate on test data
        metrics_dict, ground_truth_list, predictions_list, test_images_list = evaluate(
            model=model,
            dataloader=testloader,
            device=device,
            loss_function=loss_function)

        #generate heatmaps using GradCAM for some test images
        save_XAI(model=model,
                 test_images_list=test_images_list,
                 ground_truth_list=ground_truth_list,
                 predictions_list=predictions_list,
                 saving_dir=split_path,
                 device=device,
                 class_index_dict=class_index_dict)

        #print test metrics
        for k, v in metrics_dict.items():
            print(f'{k}_test: {v}')

        #save training history
        experiment = Experiment()
        experiment.add('class_index_dict', class_index_dict)
        experiment.add('model', model)
        experiment.add('resnet_size', resnet_size)

        for k, v in metrics_dict.items():
            experiment.add(f'{k}_test', v)

        for k, v in history.items():
            experiment.add(k, v)

        experiment.save(split_path)

    return
Ejemplo n.º 16
0
                                                        alpha_face=0.0,
                                                        color_lines=(255, 0,
                                                                     0))
                #image_with_polygon, alpha_points=0, alpha_face=0.5, color_lines=(255, 0, 0))
            if not os.path.exists(vis_dir):
                os.makedirs(vis_dir)
            save_path = os.path.join(
                vis_dir,
                self.images_name[i] + '_polygon' + '.' + self.images_format[i])
            cv2.imwrite(save_path, image_with_polygon)


'''
changes the color temperature of images to a random value between 1100 and 10000 Kelvin
'''
aug_colorTemperature = iaa.ChangeColorTemperature((1100, 10000))
'''
Convert each image to a colorspace with a brightness-related channel, extract
that channel, multiply it by a factor between 0.5 and 1.5, add a value between
-30 and 30 and convert back to the original colorspace
'''
aug_brightness = iaa.MultiplyAndAddToBrightness(mul=(0.5, 1.5), add=(-30, 30))
'''
Multiply the hue and saturation of images by random values;
Sample random values from the discrete uniform range [-50..50],and add them
'''
aug_hueSaturation = [
    iaa.MultiplyHue((0.5, 1.5)),
    iaa.MultiplySaturation((0.5, 1.5)),
    iaa.AddToHue((-50, 50)),
    iaa.AddToSaturation((-50, 50))
Ejemplo n.º 17
0
seq = iaa.Sequential([
    iaa.Fliplr(p=0),# basically this is original one
    iaa.Crop(px=(22, 45),keep_size=False), # crop images from each side by 0 to 16px (randomly chosen)
    iaa.Fliplr(1), # horizontally flip 50% of the images
    iaa.GaussianBlur(sigma=(5, 7.0)), # blur images with a sigma of 0 to 3.0
    iaa.ImpulseNoise(p=(0.6,1)),
    iaa.EdgeDetect(alpha=(0.9,1)),
    #iaa.AddToBrightness(add=(100,124)),
    iaa.Canny(alpha=(0.8,0.9)),
    iaa.Grayscale(alpha=1.00),
    iaa.ChannelShuffle(p=1),
    iaa.geometric.Affine( scale=2,rotate=22, backend='cv2'),
    iaa.Cartoon(blur_ksize=(11,13)),
    iaa.CenterCropToAspectRatio(1),
    iaa.CenterCropToFixedSize(100,100),
    iaa.ChangeColorTemperature(kelvin=(2222,3333)),
    #iaa.segmentation(),
    iaa.CLAHE(clip_limit=(4,8)),
    iaa.Rotate(rotate=(-30,90))
])

plt.figure(figsize=(12,12))

for idx,Augmentor in enumerate(seq):
    # print(1)
    ax=plt.subplot(4,4,idx+1)
    ax.axis('off')
    plt.tight_layout()
    title=str(Augmentor).split('(')[0]
    #plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, 
    #        hspace = 0.2, wspace = 0)
Ejemplo n.º 18
0
                                                 start_at=(0.8, 1),
                                                 end_at=(0, 0.2)),
            iaa.BlendAlphaVerticalLinearGradient(iaa.Add(
                iap.Normal(iap.Choice([-40, 40]), 10)),
                                                 start_at=(0, 0.2),
                                                 end_at=(0.8, 1)),
            # Change contrast
            iaa.SigmoidContrast(gain=(3, 9), cutoff=(0.3, 0.7)),
            iaa.LinearContrast((0.4, 1.6)),
            iaa.GammaContrast((0.5, 2)),
            iaa.LogContrast(gain=(0.5, 1.5)),
            iaa.pillike.Autocontrast((2, 5)),
            iaa.Emboss(alpha=(0.1, 1), strength=(1, 2)),
        ]),
    ),
    iaa.Sometimes(0.25, iaa.ChangeColorTemperature((4000, 12000))),
    # Blur + compress
    iaa.Sometimes(
        0.15,
        iaa.OneOf([
            iaa.imgcorruptlike.Pixelate(severity=(1, 2)),
            # iaa.imgcorruptlike.JpegCompression(severity=(1, 3)),
            iaa.UniformColorQuantization(n_colors=(20, 100)),
            iaa.AveragePooling((2, 3)),
            iaa.GaussianBlur((1, 2)),
            iaa.MotionBlur((3, 5)),
            iaa.imgcorruptlike.DefocusBlur(severity=(1)),
            iaa.pillike.EnhanceSharpness(),
            iaa.pillike.FilterSmoothMore(),
        ])),
])
Ejemplo n.º 19
0
# plt.figure(figsize=(10,10))
# plt.imshow(im1, cmap='gray')
# plt.axis('off')
# plt.show()
# exit()

aug_seq = iaa.Sequential([
    iaa.Resize({
        "height": 72 * 5,
        "width": 128 * 5
    }),
    iaa.MultiplyAndAddToBrightness(mul=(0.9, 1.1), add=0),
    # 가우시안 필터는 scale 0이 정상 0~15사이인데 정상이 중간값으로 진행되지 않습니다.
    iaa.SigmoidContrast(gain=5, cutoff=0.35),
    iaa.GammaContrast((0.9, 1.1), per_channel=True),
    iaa.ChangeColorTemperature(kelvin=(8000, 12000)),
    iaa.MultiplyHueAndSaturation((0.8, 1.1), per_channel=True),
    iaa.Resize({
        "height": 720,
        "width": 1280
    }),
    iaa.Sometimes(
        0.5,
        iaa.Sequential([
            iaa.MotionBlur(k=15, angle=[-90, 90]),
            iaa.AdditiveGaussianNoise(scale=(0, 15)),
        ]))
])

# dft = cv2.dft(np.float32(gray_img),flags = cv2.DFT_COMPLEX_OUTPUT)
# dft_shift = np.fft.fftshift(dft)
Ejemplo n.º 20
0
    def __getitem__(self, idx):
        img = self.file['data'][idx]
        img = cv2.resize(src=img, dsize=(self.img_w, self.img_h))
        gt_map = self.file['gt'][idx]
        gt_map = cv2.resize(src=gt_map,
                            dsize=(self.img_w, self.img_h)).astype('int32')
        gt_map = cv2.flip(
            gt_map, 0)  # flip gt_cause origin DeepLab produces flipped output

        if self.mode == 'test':
            # normalized_img = img / img.max()
            # norm_img = np.divide(normalized_img - self.mean_mat, self.std_mat)
            norm_img = (2 * (img / 255)) - 1

            # gt_one_hot = (np.arange(self.num_of_classes) == gt_map[..., None]).astype(int)
            return {
                'image': np.moveaxis(norm_img, -1, 0),
                # 'gt': np.moveaxis(gt_one_hot, -1, 0),
                'gt_reg_map': gt_map,
                'idx': idx
            }

        else:
            segmap = SegmentationMapsOnImage(gt_map, shape=img.shape)
            seq = iaa.Sequential([
                #   iaa.CropToFixedSize(width=self.crop_w, height=self.crop_h),
                sometimes(
                    iaa.OneOf([
                        iaa.GaussianBlur((0, 3.0)),
                        iaa.AverageBlur(k=(2, 4)),
                    ])),
                sometimes(iaa.ChangeColorTemperature((1100, 10000))),
                sometimes(
                    iaa.OneOf([
                        iaa.Add((-10, 10), per_channel=0.5),
                        iaa.Multiply((0.85, 1.15), per_channel=0.5)
                    ])),
                # iaa.OneOf([iaa.GammaContrast((0.5, 2.0)),
                #            iaa.GammaContrast((0.5, 2.0), per_channel=True),
                #            iaa.LogContrast(gain=(0.6, 1.4))]),
                # sometimes(iaa.JpegCompression(compression=(80, 99))),
                #   sometimes(iaa.CropAndPad(percent=(-0.5, 0.5))),
                sometimes(iaa.Grayscale(alpha=(0.0, 1.0)))
            ])
            # pdb.set_trace()
            augmented_img, augmented_map = seq(image=img.astype('uint8'),
                                               segmentation_maps=segmap)
            augmented_map = augmented_map.arr[..., 0]
            # flip - lr:
            if np.random.uniform() > 0.5:
                augmented_img = cv2.flip(augmented_img, 1)
                augmented_map = cv2.flip(augmented_map, 1)
            # normalized_img = np.true_divide(augmented_img, augmented_img.max())
            # norm_img = np.true_divide(normalized_img - self.mean_mat, self.std_mat)
            norm_img = (2 * (augmented_img / 255)) - 1
            # gt_one_hot = (np.arange(self.num_of_classes) == augmented_map[..., None]).astype(int)
            # pdb.set_trace()
            sample = {
                'image': np.moveaxis(norm_img, -1, 0),
                #   'gt': np.moveaxis(gt_one_hot, -1, 0),
                'gt_reg_map': augmented_map,
                'idx': idx
            }
            return sample
Ejemplo n.º 21
0
        transformed_image = transform(image=image)['image']

    elif augmentation == 'to_sepia':
        transform = ToSepia(always_apply=True)
        transformed_image = transform(image=image)['image']

    elif augmentation == 'fancy_pca':
        transform = FancyPCA(always_apply=True, alpha=1.0)
        transformed_image = transform(image=image)['image']

    elif augmentation == 'rgb_shift':
        transform = RGBShift(always_apply=True)
        transformed_image = transform(image=image)['image']

    elif augmentation == 'change_color_temperature':
        transform = iaa.ChangeColorTemperature((1100, 10000))
        transformed_image = transform(image=image)

    elif augmentation == 'kmeans_color_quantization':
        transform = iaa.KMeansColorQuantization()
        transformed_image = transform(image=image)

    elif augmentation == 'uniform_color_quantization':
        transform = iaa.UniformColorQuantization()
        transformed_image = transform(image=image)

    elif augmentation == 'channel_shuffle':
        transform = ChannelShuffle(always_apply=True)
        transformed_image = transform(image=image)['image'] 

    ## Contrast
Ejemplo n.º 22
0
    to_colorspace="HSV",
    from_colorspace="RGB",
    children=iaa.WithChannels(0,iaa.Add((0, 50))))
aug40 = iaa.WithHueAndSaturation(
    iaa.WithChannels(0, iaa.Add((0, 50))))
aug41 = iaa.MultiplyHueAndSaturation((0.5, 1.9), per_channel=True)
aug42 = iaa.AddToHueAndSaturation((-50, 50), per_channel=True)
aug43 = iaa.AddToHue((-50, 50))
aug44 = iaa.AddToSaturation((-50, 50))
aug45 = iaa.Sequential([
    iaa.ChangeColorspace(from_colorspace="RGB", to_colorspace="HSV"),
    iaa.WithChannels(0, iaa.Add((50, 100))),
    iaa.ChangeColorspace(from_colorspace="HSV", to_colorspace="RGB")])
    
aug46 = iaa.Grayscale(alpha=(0.0, 1.0))
aug47 = iaa.ChangeColorTemperature((1100, 10000))
aug49 = iaa.UniformColorQuantization()
aug50 = iaa.UniformColorQuantizationToNBits()
aug51 = iaa.GammaContrast((0.5, 2.0), per_channel=True)
aug52 = iaa.SigmoidContrast(gain=(3, 10), cutoff=(0.4, 0.6), per_channel=True)
aug53 = iaa.LogContrast(gain=(0.6, 1.4), per_channel=True)
aug54 = iaa.LinearContrast((0.4, 1.6), per_channel=True)
# aug55 = iaa.AllChannelsCLAHE(clip_limit=(1, 10), per_channel=True)
aug56 = iaa.Alpha((0.0, 1.0), iaa.AllChannelsHistogramEqualization())
aug57 = iaa.HistogramEqualization(
    from_colorspace=iaa.HistogramEqualization.BGR,
    to_colorspace=iaa.HistogramEqualization.HSV)

aug58  = iaa.DirectedEdgeDetect(alpha=(0.0, 0.5), direction=(0.0, 0.5))
aug59 = iaa.Canny(
    alpha=(0.0, 0.3),
seq = iaa.Sequential([
    iaa.Fliplr(p=0),# basically this is original one
    iaa.Sometimes(0.05,(iaa.Crop(px=(22, 45),keep_size=True))), # crop images from each side by 0 to 16px (randomly chosen)
    iaa.Sometimes(0.5,(iaa.Fliplr(1))), # horizontally flip 50% of the images
    iaa.Sometimes(0.02,iaa.GaussianBlur(sigma=(5, 7.0))), # blur images with a sigma of 0 to 3.0
    iaa.Sometimes(0.02 ,iaa.ImpulseNoise(p=(0.6,1))),
    iaa.Sometimes(0.02 ,iaa.EdgeDetect(alpha=(0.09,1))),
    #iaa.AddToBrightness(add=(100,124)),
    iaa.Sometimes(0.02 ,iaa.Canny(alpha=(0.8,0.9))),
    iaa.Sometimes(0.5 ,iaa.Grayscale(alpha=1.00)),
    iaa.Sometimes(0.5 ,iaa.ChannelShuffle(p=1)),
    #iaa.Sometimes(0.02 ,(iaa.geometric.Affine( scale=2,rotate=22,order=1))),
    iaa.Sometimes(0.5 ,iaa.Cartoon(blur_ksize=(11,13))),
    iaa.Sometimes(0.02 ,iaa.CenterCropToAspectRatio(1)),
    iaa.Sometimes(0.02 ,iaa.CenterCropToFixedSize(100,100)),
    iaa.Sometimes(0.12 ,iaa.ChangeColorTemperature(kelvin=(2222,3333))),
    #iaa.segmentation(),
    iaa.Sometimes(0.12 ,iaa.CLAHE(clip_limit=(4,8))),
    iaa.Sometimes(0.8 ,iaa.Rotate(rotate=(-90,90),order=1))
])

plt.figure(figsize=(12,12))


ls=glob.glob(path)
res=[]
for idx,l in enumerate(ls):
    res.append(Parser.myType(l,idx,classes=['bird','zebra']))


def our_generator(res):
Ejemplo n.º 24
0
 def __init__(self, ColorTemperature=None):
     self.ColorTemperature = ColorTemperature
     self.seq = iaa.Sequential([
         iaa.ChangeColorTemperature((4300, 6000)),
     ])
Ejemplo n.º 25
0
	def ColorTemperature(self):
		temp = iaa.ChangeColorTemperature((1100, 10000))
		image_temp = temp.augment_images([self.image])
		return image_temp[0]
import numpy as np
import cv2
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug.augmentables import Keypoint, KeypointsOnImage

sometimes = lambda aug: iaa.Sometimes(0.5, aug)

KPT_AUGS = [
    iaa.LinearContrast((0.95, 1.05), per_channel=0.25),
    iaa.Add((-10, 10), per_channel=False),
    iaa.GammaContrast((0.95, 1.05)),
    iaa.GaussianBlur(sigma=(0.0, 0.6)),
    iaa.MultiplySaturation((0.95, 1.05)),
    iaa.AddToHueAndSaturation((-255, 255)),
    iaa.ChangeColorTemperature((1000, 20000)),
    iaa.AdditiveGaussianNoise(scale=(0, 0.0125 * 255)),
    iaa.flip.Flipud(0.5),
    sometimes(
        iaa.Affine(
            scale={
                "x": (0.8, 1.2),
                "y": (0.8, 1.2)
            },  # scale images to 80-120% of their size, individually per axis
            translate_percent={
                "x": (-0.25, 0.25),
                "y": (-0.2, 0.2)
            },  # translate by -20 to +20 percent (per axis)
            rotate=(-30, 30),  # rotate by -45 to +45 degrees
            shear=(-10, 10),  # shear by -16 to +16 degrees
            order=[0, 1
Ejemplo n.º 27
0
import numpy as np
from imgaug import augmenters as iaa
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms

from skimage import io
import os

tfs = transforms.Compose([
    iaa.Sequential([
                    iaa.Sometimes(0.5, iaa.Fliplr(1.0)),
                    iaa.Sometimes(0.5, iaa.MultiplyBrightness((0.3, 1.3))),
                    iaa.Sometimes(0.2, iaa.ChangeColorTemperature((4300, 6000))),
                    #iaa.Sometimes(0.9, iaa.Affine(rotate=(-180, 180), shear=(-6, 6))),
    ]).augment_image,
    transforms.ToTensor()
])


class CustomDataset(Dataset):
    def __init__(self, n_images, n_classes=15, transform=None):
        self.images = []
        
        self.transform = transform

        test_path = "/content/gdrive/My Drive/Arirang/data/test/images"
        file_list = os.listdir(test_path)
        file_list_png = [file for file in file_list if file.endswith(".png")]
        
        for idx, filename in enumerate(file_list_png):
            self.images.append( os.path.join(test_path, filename)) 
Ejemplo n.º 28
0
def _write_tfrecord(dataset_split, anno_lines):
    labels_list = []
    width_list = []
    long_label = []
    long_pic = []

    if not os.path.exists(FLAGS.data_dir):
        os.makedirs(FLAGS.data_dir)
    if  not FLAGS.aug:
        fix = '.tfrecord'
    else:
        if FLAGS.MotionBlur:
            fix = '_motion_blur.tfrecord'
        if FLAGS.ColorTemp:
            fix = '_color_temp.tfrecord'
        if FLAGS.HUE:
            fix = '_hue.tfrecord'
    tfrecords_path = os.path.join(FLAGS.data_dir, dataset_split + '-' + cfg.Lang + '-' 
                    + time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime(time.time())) + fix)
    # 记录tfrecord信息,图片最大宽度
    tfrecord_info = open(os.path.join(FLAGS.data_dir, 'info.txt'), 'a+', encoding='utf8')
    with tf.python_io.TFRecordWriter(tfrecords_path) as writer:
        for i, line in enumerate(anno_lines):
            line = line.strip()
            image_name, label = parse_textrender_label(line)
            if label == '卍':
                continue
            # 如果label中含有模糊行,整行过滤
            if '###' in label:
                continue
            ########## !!!!暂时处理,需要删去 #############
            if 'ô' in label:
                continue
            ##########################################
            # 阿拉伯语label要特殊处理
            if cfg.Lang == 'AB':
                label = label[::-1]
            # label最大字符长度,超过此长度过滤
            if len(label) > FLAGS.max_label_lenth:
                continue
            # label最短字符长度,小于此长度过滤
            if len(label) < FLAGS.min_label_lenth:
                continue
            image_path = os.path.join(FLAGS.image_dir, image_name)

            image = cv2.imread(image_path)
            if image is None: 
                continue # skip bad image.

            h, w, c = image.shape
            if w > h:
                height = _IMAGE_HEIGHT
                width = int(w * height / h)
                labels_list.append(len(label))
                width_list.append(width)
                image = cv2.resize(image, (width, height))
                if not FLAGS.aug:
                    is_success, image_buffer = cv2.imencode('.jpg', image)
                else:
                    if FLAGS.MotionBlur:
                        aug = iaa.MotionBlur(k = 10, angle = np.random.randint(-45, 45))
                    if FLAGS.ColorTemp:
                        aug = iaa.ChangeColorTemperature((1100, 10000))
                    if FLAGS.HUE:
                        aug = iaa.MultiplyHueAndSaturation((0.5, 1.5), per_channel=True)
                    img_aug = aug(image = image)
                    is_success, image_buffer = cv2.imencode('.jpg', img_aug)
                if not is_success:
                    continue
                # convert string object to bytes in py3
                image_name = image_name if sys.version_info[0] < 3 else image_name.encode('utf-8') 
                features = tf.train.Features(feature={
                   'labels': _int64_feature(_string_to_int(label)),
                   'images': _bytes_feature(image_buffer.tostring()),
                   'imagenames': _bytes_feature(image_name)
                })
                example = tf.train.Example(features=features)
                writer.write(example.SerializeToString())
                sys.stdout.write('\r>>Writing to {:s}.tfrecords {:d}/{:d}'.format(dataset_split, i + 1, len(anno_lines)))
                sys.stdout.flush()

        sys.stdout.write('\n')
        sys.stdout.write('>> {:s}.tfrecords write finish.\n'.format(dataset_split))
        sys.stdout.flush()
        if width_list:
            tfrecord_info.write('{}: {}\n'.format(tfrecords_path.split('/')[-1], max(width_list)))
    def __iter__(self):
        data = []
        labels = []
        if self.mode == 'train':
            data = self.data_files
            labels = self.label_files
        elif self.mode == 'eval':
            data = self.eval_files
            labels = self.eval_labels
        elif self.mode == 'test':
            data = self.test_files

        data_size = len(data)

        if self.mode == 'test':
            input_batch = torch.zeros([1, 3, self.input_height, self.input_width], dtype=torch.float32)
        else:
            input_batch = torch.zeros([self.batch_size, 3, self.input_height, self.input_width], dtype=torch.float32)
            target_batch = torch.zeros([self.batch_size, 1, self.input_height, self.input_width], dtype=torch.float32)        

        if self.mode == 'test':
            current = 0
            while current < data_size:
                data_image_orig = cv2.imread(data[current])
                data_image_orig = cv2.resize(data_image_orig, (self.input_width, self.input_height), interpolation=cv2.INTER_NEAREST)
                input_batch[0, :, :, :] = self.normalize(data_image_orig)

                yield input_batch, data[current]
                current += 1
        else:
            current = 0
            while current < data_size:
                count = 0
                while count < self.batch_size and current < data_size:
                    # print(data[current])
                    # print(labels[current])
                    data_image_orig = cv2.imread(data[current])
                    label_image_orig = cv2.imread(labels[current], cv2.IMREAD_GRAYSCALE)
                    
                    # Resizing
                    data_image_orig = cv2.resize(data_image_orig, (self.input_width, self.input_height), interpolation=cv2.INTER_NEAREST)
                    # To crop change to 572 and un comment next line
                    # To not crop 388 (check assignment chart again)
                    #label_image_orig = label_image_orig.resize((388,388)) 
                    label_image_orig = cv2.resize(label_image_orig, (self.input_width, self.input_height), interpolation=cv2.INTER_NEAREST)
                    _, label_image_orig = cv2.threshold(label_image_orig, 127, 255, cv2.THRESH_BINARY)        
                    
                    ## AUGMENTATION ##
                    # img_size = np.shape(label_image_orig)
                    # segmap = np.zeros(img_size, dtype=np.uint8)
                    # segmap[:] = label_image_orig
                    # segmap = SegmentationMapOnImage(segmap, shape=img_size)
                    segmap = SegmentationMapsOnImage(label_image_orig, shape=np.shape(label_image_orig))
                    
                    # Augementation pipeline
                    # pipeline = iaa.Sometimes(
                    #                     0.7,
                    pipeline =  iaa.OneOf([
                                    iaa.Affine(scale=(0.5, 1.5)),
                                    iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),
                                    iaa.WithBrightnessChannels(iaa.Add((-50, 50))),
                                    iaa.WithHueAndSaturation(iaa.WithChannels(0, iaa.Add((0, 50)))),
                                    iaa.ChangeColorTemperature((1100, 10000)),
                                    iaa.GammaContrast((0.5, 2.0))
                                ])
                                    # )
                    
                    if random.random() > 0.3:
                        if random.random() > 0.4:
                            data_image_aug, label_image_aug = pipeline(image = data_image_orig, segmentation_maps=segmap)
                            label_image_aug = label_image_aug.get_arr()
                        else:
                            data_image_aug = self.warming_transform(data_image_orig)
                            label_image_aug = label_image_orig
                    else:
                        data_image_aug = data_image_orig
                        label_image_aug = label_image_orig

                    # data_image_aug = data_image_aug.transpose((2, 0, 1))
                    # label_image_aug = np.expand_dims(label_image_aug.get_arr(), axis=0).astype('uint8')
                    input_batch[count, :, :, :] = self.normalize(data_image_aug)

                    # label_image_aug = label_image_aug.get_arr() // 255
                    # label_image_aug = label_image_aug.astype('uint8')
                    # target_batch[count, :, :] = torch.from_numpy(label_image_aug).long()

                    label_image_aug = np.expand_dims(label_image_aug.astype(np.float32) / 255.0, axis=0)
                    target_batch[count, :, :, :] = torch.from_numpy(label_image_aug)

                    count += 1
                    current += 1
                   
                yield input_batch, target_batch