#锐化处理
                iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),
                #边缘检测,将检测到的赋值0或者255然后叠在原图上
                sometimes(
                    iaa.OneOf([
                        iaa.EdgeDetect(alpha=(0, 0.7)),
                        iaa.DirectedEdgeDetect(alpha=(0, 0.7),
                                               direction=(0.0, 1.0)),
                    ])),
                # 加入高斯噪声
                iaa.AdditiveGaussianNoise(
                    loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
                # 将1%到5%的像素设置为黑色
                # 或者将3%到15%的像素用原图大小2%到5%的黑色方块覆盖
                iaa.OneOf([
                    iaa.Dropout((0.01, 0.05), per_channel=0.5),
                    iaa.CoarseDropout((0.03, 0.10),
                                      size_percent=(0.02, 0.05),
                                      per_channel=0.2),
                ]),
                #5%的概率反转像素的强度,即原来的强度为v那么现在的就是255-v
                # 每个像素随机加减-10到10之间的数
                iaa.Add((-10, 10), per_channel=0.5),
                # 像素乘上0.5或者1.5之间的数字.
                iaa.Multiply((0.8, 1.2), per_channel=0.5),

                # 将整个图像的对比度变为原来的一半或者二倍
                iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5),
                # 将RGB变成灰度图然后乘alpha加在原图上
                iaa.Grayscale(alpha=(0.0, 0.2)),
                #把像素移动到周围的地方。这个方法在mnist数据集增强中有见到
Пример #2
0
def _create_augment_pipeline():
    from imgaug import augmenters as iaa

    # augmentors by https://github.com/aleju/imgaug
    def sometimes(aug):
        return iaa.Sometimes(0.5, aug)

    # Define our sequence of augmentation steps that will be applied to every image
    # All augmenters with per_channel=0.5 will sample one value _per image_
    # in 50% of all cases. In all other cases they will sample new values
    # _per channel_.
    aug_pipe = iaa.Sequential(
        [
            # apply the following augmenters to most images
            # iaa.Fliplr(0.5), # horizontally flip 50% of all images
            # iaa.Flipud(0.2), # vertically flip 20% of all images
            # sometimes(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
            sometimes(
                iaa.Affine(
                    # scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
                    # translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
                    # rotate=(-5, 5), # rotate by -45 to +45 degrees
                    # shear=(-5, 5), # shear by -16 to +16 degrees
                    # order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
                    # cval=(0, 255), # if mode is constant, use a cval between 0 and 255
                    # mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                )),
            # execute 0 to 5 of the following (less important) augmenters per image
            # don't execute all of them, as that would often be way too strong
            iaa.SomeOf(
                (0, 5),
                [
                    # sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
                    iaa.OneOf([
                        iaa.GaussianBlur(
                            (0, 3.0
                             )),  # blur images with a sigma between 0 and 3.0
                        # blur image using local means with kernel sizes between 2 and 7
                        iaa.AverageBlur(k=(2, 7)),
                        # blur image using local medians with kernel sizes between 2 and 7
                        iaa.MedianBlur(k=(3, 11)),
                    ]),
                    iaa.Sharpen(alpha=(0, 1.0),
                                lightness=(0.75, 1.5)),  # sharpen images
                    # iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
                    # search either for all edges or for directed edges
                    # sometimes(iaa.OneOf([
                    #    iaa.EdgeDetect(alpha=(0, 0.7)),
                    #    iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)),
                    # ])),
                    # add gaussian noise to images
                    iaa.AdditiveGaussianNoise(
                        loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
                    iaa.OneOf([
                        # randomly remove up to 10% of the pixels
                        iaa.Dropout((0.01, 0.1), per_channel=0.5),
                        #iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
                    ]),
                    # iaa.Invert(0.05, per_channel=True), # invert color channels
                    # change brightness of images (by -10 to 10 of original value)
                    iaa.Add((-10, 10), per_channel=0.5),
                    # change brightness of images (50-150% of original value)
                    iaa.Multiply((0.5, 1.5), per_channel=0.5),
                    # improve or worsen the contrast
                    iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5),
                    #iaa.Grayscale(alpha=(0.0, 1.0)),
                    # sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
                    # sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))) # sometimes move parts of the image around
                ],
                random_order=True)
        ],
        random_order=True)
    return aug_pipe
Пример #3
0
    sometimes_010(
        iaa.OneOf([
            iaa.LogContrast((0.8, 1.2)),
            iaa.GammaContrast((0.8, 1.2)),
            iaa.LinearContrast((0.8, 1.2)),
            iaa.Alpha((0.0, 1.0), iaa.AllChannelsHistogramEqualization()),
            iaa.Alpha((0.0, 1.0), iaa.HistogramEqualization()),
            iaa.CLAHE(clip_limit=(1, 3)),
            iaa.AllChannelsCLAHE(clip_limit=(1, 3)),
        ]))
])

dropouts = iaa.Sequential([
    sometimes_010(
        iaa.OneOf([
            iaa.Dropout(p=0.01, per_channel=True),
            iaa.Dropout(p=0.01, per_channel=False),
            iaa.Cutout(fill_mode="constant",
                       cval=(0, 255),
                       size=(0.1, 0.4),
                       fill_per_channel=0.5),
            iaa.CoarseDropout((0.0, 0.08),
                              size_percent=(0.02, 0.25),
                              per_channel=0.5),
            iaa.SaltAndPepper(p=0.01, per_channel=True),
            iaa.SaltAndPepper(p=0.01, per_channel=False),
            iaa.AdditiveLaplaceNoise(scale=0.02 * 255, per_channel=True),
            iaa.AdditiveLaplaceNoise(scale=0.02 * 255, per_channel=False),
            iaa.AdditiveGaussianNoise(scale=0.02 * 255, per_channel=True),
            iaa.AdditiveGaussianNoise(scale=0.02 * 255, per_channel=False),
            iaa.AdditivePoissonNoise(lam=4.0, per_channel=True),
Пример #4
0
image = ia.quokka(size=(128, 128))

# Create an example segmentation map (int32, 128x128).
# Here, we just randomly place some squares on the image.
# Class 0 is the background class.
segmap = np.zeros((128, 128), dtype=np.int32)
segmap[28:71, 35:85] = 1
segmap[10:25, 30:45] = 2
segmap[10:25, 70:85] = 3
segmap[10:110, 5:10] = 4
segmap[118:123, 10:110] = 5
segmap = ia.SegmentationMapOnImage(segmap, shape=image.shape, nb_classes=1+5)

# Define our augmentation pipeline.
seq = iaa.Sequential([
    iaa.Dropout([0.05, 0.2]),      # drop 5% or 20% of all pixels
    iaa.Sharpen((0.0, 1.0)),       # sharpen the image
    iaa.Affine(rotate=(-45, 45)),  # rotate by -45 to 45 degrees (affects heatmaps)
    iaa.ElasticTransformation(alpha=50, sigma=5)  # apply water effect (affects heatmaps)
], random_order=True)

# Augment images and heatmaps.
images_aug = []
segmaps_aug = []
for _ in range(5):
    seq_det = seq.to_deterministic()
    images_aug.append(seq_det.augment_image(image))
    segmaps_aug.append(seq_det.augment_segmentation_maps([segmap])[0])

# We want to generate an image of original input images and heatmaps before/after augmentation.
# It is supposed to have five columns: (1) original image, (2) augmented image,
Пример #5
0
    def __init__(self):

        configMain.__init__(self)

        st = lambda aug: iaa.Sometimes(0.4, aug)
        oc = lambda aug: iaa.Sometimes(0.3, aug)
        rl = lambda aug: iaa.Sometimes(0.09, aug)
        self.augment = iaa.Sequential(
            [
                rl(iaa.GaussianBlur(
                    (0, 1.5))),  # blur images with a sigma between 0 and 1.5
                rl(
                    iaa.AdditiveGaussianNoise(
                        loc=0, scale=(0.0, 0.05),
                        per_channel=0.5)),  # add gaussian noise to images
                oc(iaa.Dropout((0.0, 0.10), per_channel=0.5)
                   ),  # randomly remove up to X% of the pixels
                oc(
                    iaa.CoarseDropout(
                        (0.0, 0.10), size_percent=(0.08, 0.2), per_channel=0.5)
                ),  # randomly remove up to X% of the pixels
                oc(
                    iaa.Add((-40, 40), per_channel=0.5)
                ),  # change brightness of images (by -X to Y of original value)
                st(iaa.Multiply((0.10, 2.5), per_channel=0.2)
                   ),  # change brightness of images (X-Y% of original value)
                rl(iaa.ContrastNormalization(
                    (0.5, 1.5),
                    per_channel=0.5)),  # improve or worsen the contrast
                rl(iaa.Grayscale((0.0, 1))),  # put grayscale
            ],
            random_order=True  # do all of the above in random order
        )
        self.augment_labels = True
        self.augment_amount = 1  #3=max, 2=mid, 1=min
        self.labels_to_augment = {
            "road": True,
            "buildings": True,
            "grass": True,
            "sky_n_zebra": True
        }

        # there are files with data, 200 images each, and here we select which ones to use

        #5self.dataset_name = 'Carla'
        #with open(os.path.join(self.save_data_stats, 'path'),'r') as f:
        #	path = f.read().strip()

        path = '../VirtualElektraData2'

        train_path = os.path.join(path, 'SeqTrain')
        val_path = os.path.join(path, 'SeqVal')

        print train_path, val_path

        self.train_db_path = [
            os.path.join(train_path, f)
            for f in glob.glob1(train_path, "data_*.h5")
        ]
        self.val_db_path = [
            os.path.join(val_path, f)
            for f in glob.glob1(val_path, "data_*.h5")
        ]

        # When using data with noise, remove the recording during the first half of the noise impulse
        # TODO Felipe: change to noise percentage.
        self.remove_noise = False

        # Speed Divide Factor

        #TODO: FOr now is hardcooded, but eventually we should be able to calculate this from data at the loading time.
        self.speed_factor = 1.0  # In KM/H FOR GTA it should be maximun 30.0

        # The division is made by three diferent data kinds
        # in every mini-batch there will be equal number of samples with labels from each group
        # e.g. for [[0,1],[2]] there will be 50% samples with labels 0 and 1, and 50% samples with label 2
        self.labels_per_division = [[2], [2], [2]]

        self.dataset_names = ['targets']

        self.queue_capacity = 20 * self.batch_size

        # TODO NOT IMPLEMENTED Felipe: True/False switches to turn data balancing on or off
        self.balances_val = True
        self.balances_train = True
        self.augment_and_saturate_factor = True
Пример #6
0
def main():
    random.seed()
    ia.seed(random.randrange(10000))

    bg_images = generate_data.load_dtd(dtd_dir='%s/dtd/images' % data_dir,
                                       dump_it=False)
    #bg_images = [cv2.imread('data/frilly_0007.jpg')]
    background = generate_data.Backgrounds(images=bg_images)

    #card_pool = pd.DataFrame()
    #for set_name in fetch_data.all_set_list:
    #    df = fetch_data.load_all_cards_text('%s/csv/%s.csv' % (data_dir, set_name))
    #    card_pool = card_pool.append(df)
    card_pool = fetch_data.load_all_cards_text('%s/csv/custom.csv' % data_dir)
    class_ids = {}
    with open('%s/obj.names' % data_dir) as names_file:
        class_name_list = names_file.read().splitlines()
        for i in range(len(class_name_list)):
            class_ids[class_name_list[i]] = i
    print(class_ids)

    num_gen = 60000
    num_iter = 1

    for i in range(num_gen):
        # Arbitrarily select top left and right corners for perspective transformation
        # Since the training image are generated with random rotation, don't need to skew all four sides
        skew = [[random.uniform(0, 0.25), 0], [0, 1], [1, 1],
                [random.uniform(0.75, 1), 0]]
        generator = ImageGenerator(background.get_random(),
                                   class_ids,
                                   1440,
                                   960,
                                   skew=skew)
        out_name = ''
        for _, card_info in card_pool.sample(random.randint(2, 5)).iterrows():
            img_name = '%s/card_img/png/%s/%s_%s.png' % (
                data_dir, card_info['set'], card_info['collector_number'],
                fetch_data.get_valid_filename(card_info['name']))
            out_name += '%s%s_' % (card_info['set'],
                                   card_info['collector_number'])
            card_img = cv2.imread(img_name)
            if card_img is None:
                fetch_data.fetch_card_image(card_info,
                                            out_dir='%s/card_img/png/%s' %
                                            (data_dir, card_info['set']))
                card_img = cv2.imread(img_name)
            if card_img is None:
                print('WARNING: card %s is not found!' % img_name)
            detected_object_list = generate_data.apply_bounding_box(
                card_img, card_info)
            card = Card(card_img, card_info, detected_object_list)
            generator.add_card(card)
        for j in range(num_iter):
            seq = iaa.Sequential([
                iaa.Multiply((0.8, 1.2)),  # darken / brighten the whole image
                iaa.SimplexNoiseAlpha(first=iaa.Add(random.randrange(64)),
                                      per_channel=0.1,
                                      size_px_max=[3, 6],
                                      upscale_method="cubic"),  # Lighting
                iaa.AdditiveGaussianNoise(scale=random.uniform(0, 0.05) * 255,
                                          per_channel=0.1),  # Noises
                iaa.Dropout(p=[0, 0.05], per_channel=0.1)
            ])

            if i % 3 == 0:
                generator.generate_non_obstructive()
                generator.export_training_data(
                    visibility=0.0,
                    out_name='%s/train/non_obstructive_10/%s%d' %
                    (data_dir, out_name, j),
                    aug=seq)
            elif i % 3 == 1:
                generator.generate_horizontal_span(
                    theta=random.uniform(-math.pi, math.pi))
                generator.export_training_data(
                    visibility=0.0,
                    out_name='%s/train/horizontal_span_10/%s%d' %
                    (data_dir, out_name, j),
                    aug=seq)
            else:
                generator.generate_vertical_span(
                    theta=random.uniform(-math.pi, math.pi))
                generator.export_training_data(
                    visibility=0.0,
                    out_name='%s/train/vertical_span_10/%s%d' %
                    (data_dir, out_name, j),
                    aug=seq)

            #generator.generate_horizontal_span(theta=random.uniform(-math.pi, math.pi))
            #generator.render(display=True, aug=seq, debug=True)
            print('Generated %s%d' % (out_name, j))
            generator.img_bg = background.get_random()
    pass
def GenerateRandomImgaugAugmentation(
        pNbAugmentations=5,  # number of augmentations
        pEnableResizing=True,  # enable scaling
        pScaleFactor=0.5,  # maximum scale factor
        pEnableCropping=True,  # enable cropping
        pCropFactor=0.25,  # maximum crop out size (minimum new size is 1.0-pCropFactor)
        pEnableFlipping1=True,  # enable x flipping
        pEnableFlipping2=True,  # enable y flipping
        pEnableRotation90=True,  # enable rotation
        pEnableRotation=True,  # enable rotation
        pMaxRotationDegree=15,  # maximum shear degree
        pEnableShearX=True,  # enable x shear
        pEnableShearY=True,  # enable y shear
        pMaxShearDegree=15,  # maximum shear degree
        pEnableDropOut=True,  # enable pixel dropout
        pMaxDropoutPercentage=.1,  # maximum dropout percentage
        pEnableBlur=True,  # enable gaussian blur
        pBlurSigma=.25,  # maximum sigma for gaussian blur
        pEnableSharpness=True,  # enable sharpness
        pSharpnessFactor=.1,  # maximum additional sharpness
        pEnableEmboss=True,  # enable emboss
        pEmbossFactor=.1,  # maximum emboss
        pEnableBrightness=True,  # enable brightness
        pBrightnessFactor=.1,  # maximum +- brightness
        pEnableRandomNoise=True,  # enable random noise
        pMaxRandomNoise=.1,  # maximum random noise strength
        pEnableInvert=False,  # enables color invert
        pEnableContrast=True,  # enable contrast change
        pContrastFactor=.1,  # maximum +- contrast
):

    augmentationMap = []
    augmentationMapOutput = []

    if pEnableResizing:
        if random.Random().randint(0, 1) == 1:
            randomResizeX = 1 - random.Random().random() * pScaleFactor
        else:
            randomResizeX = 1 + random.Random().random() * pScaleFactor
        if random.Random().randint(0, 1) == 1:
            randomResizeY = 1 - random.Random().random() * pScaleFactor
        else:
            randomResizeY = 1 + random.Random().random() * pScaleFactor
        aug = iaa.Resize({"height": randomResizeY, "width": randomResizeX})
        augmentationMap.append(aug)

    if pEnableCropping:
        randomCrop2 = random.Random().random() * pCropFactor
        randomCrop4 = random.Random().random() * pCropFactor
        randomCrop1 = random.Random().random() * pCropFactor
        randomCrop3 = random.Random().random() * pCropFactor
        aug = iaa.Crop(percent=(randomCrop1, randomCrop2, randomCrop3,
                                randomCrop4))
        augmentationMap.append(aug)

    if pEnableFlipping1:
        aug = iaa.Fliplr()
        augmentationMap.append(aug)

    if pEnableFlipping2:
        aug = iaa.Flipud()
        augmentationMap.append(aug)

    if pEnableRotation90:
        randomNumber = random.Random().randint(1, 3)
        aug = iaa.Rot90(randomNumber)
        augmentationMap.append(aug)

    if pEnableRotation:
        if random.Random().randint(0, 1) == 1:
            randomRotation = random.Random().random() * pMaxRotationDegree
        else:
            randomRotation = -random.Random().random() * pMaxRotationDegree
        aug = iaa.Rotate(randomRotation)
        augmentationMap.append(aug)

    if pEnableShearX:
        if random.Random().randint(0, 1) == 1:
            randomShearingX = random.Random().random() * pMaxShearDegree
        else:
            randomShearingX = -random.Random().random() * pMaxShearDegree
        aug = iaa.ShearX(randomShearingX)
        augmentationMap.append(aug)

    if pEnableShearY:
        if random.Random().randint(0, 1) == 1:
            randomShearingY = random.Random().random() * pMaxShearDegree
        else:
            randomShearingY = -random.Random().random() * pMaxShearDegree
        aug = iaa.ShearY(randomShearingY)
        augmentationMap.append(aug)

    if pEnableDropOut:
        randomDropOut = random.Random().random() * pMaxDropoutPercentage
        aug = iaa.Dropout(p=randomDropOut, per_channel=False)
        augmentationMap.append(aug)

    if pEnableBlur:
        randomBlur = random.Random().random() * pBlurSigma
        aug = iaa.GaussianBlur(randomBlur)
        augmentationMap.append(aug)

    if pEnableSharpness:
        randomSharpness = random.Random().random() * pSharpnessFactor
        aug = iaa.Sharpen(randomSharpness)
        augmentationMap.append(aug)

    if pEnableEmboss:
        randomEmboss = random.Random().random() * pEmbossFactor
        aug = iaa.Emboss(randomEmboss)
        augmentationMap.append(aug)

    if pEnableBrightness:
        if random.Random().randint(0, 1) == 1:
            randomBrightness = 1 - random.Random().random() * pBrightnessFactor
        else:
            randomBrightness = 1 + random.Random().random() * pBrightnessFactor
        aug = iaa.Add(randomBrightness)
        augmentationMap.append(aug)

    if pEnableRandomNoise:
        if random.Random().randint(0, 1) == 1:
            randomNoise = 1 - random.Random().random() * pMaxRandomNoise
        else:
            randomNoise = 1 + random.Random().random() * pMaxRandomNoise
        aug = iaa.MultiplyElementwise(randomNoise, per_channel=True)
        augmentationMap.append(aug)

    if pEnableInvert:
        aug = iaa.Invert(1)
        augmentationMap.append(aug)

    if pEnableContrast:
        if random.Random().randint(0, 1) == 1:
            randomContrast = 1 - random.Random().random() * pContrastFactor
        else:
            randomContrast = 1 + random.Random().random() * pContrastFactor
        aug = iaa.contrast.LinearContrast(randomContrast)
        augmentationMap.append(aug)

    widthFactor = 1
    heightFactor = 1

    arr = numpy.arange(0, len(augmentationMap))
    numpy.random.shuffle(arr)

    switchWidthHeight = False
    for i in range(pNbAugmentations):
        augmentationMapOutput.append(augmentationMap[arr[i]])
        if arr[i] == 0:
            widthFactor *= randomResizeX
            heightFactor *= randomResizeY
        if arr[i] == 1:
            widthFactor *= (1.0 - (randomCrop2 + randomCrop4))
            heightFactor *= (1.0 - (randomCrop1 + randomCrop3))
        if arr[i] == 4:
            if randomNumber == 1 or randomNumber == 3:
                switchWidhtHeight = True

    return iaa.Sequential(
        augmentationMapOutput), widthFactor, heightFactor, switchWidthHeight
    flip_ud = iaa.Flipud(1.0)
    img = flip_ud.augment_image(image)
    cv2.imwrite('dataset/transform/' + file_name + '-flip-ud' + file_extension,
                img)

    affine = iaa.Affine(rotate=(-90))
    img = affine.augment_image(image)
    cv2.imwrite('dataset/transform/' + file_name + '-affine' + file_extension,
                img)

    avg_blur = iaa.AverageBlur(k=(3, 3))
    img = avg_blur.augment_image(image)
    cv2.imwrite(
        'dataset/transform/' + file_name + '-avg-blur' + file_extension, img)

    dropout = iaa.Dropout(p=(0, 0.1))
    img = dropout.augment_image(image)
    cv2.imwrite('dataset/transform/' + file_name + '-dropout' + file_extension,
                img)


def img_func(images, random_state, parents, hooks):
    for img in images:
        img[::4] = 0
    return images


def keypoint_func(keypoints_on_images, random_state, parents, hooks):
    return keypoints_on_images

Пример #9
0
    def __init__(self,
                 list_file,
                 train,
                 transform,
                 device,
                 little_train=False,
                 with_file_path=False,
                 C=20,
                 test_mode=False):
        print('data init')

        self.train = train
        self.transform = transform
        self.fnames = []
        self.boxes = []
        self.labels = []
        self.resize = 416
        self.C = C
        self.device = device
        self._test = test_mode
        self.with_file_path = with_file_path
        self.img_augsometimes = lambda aug: iaa.Sometimes(0.25, aug)
        self.bbox_augsometimes = lambda aug: iaa.Sometimes(0.5, aug)

        self.augmentation = iaa.Sequential(
            [
                # augment without change bboxes
                self.img_augsometimes(
                    iaa.SomeOf(
                        (1, 3),
                        [
                            iaa.Dropout([0.05, 0.2
                                         ]),  # drop 5% or 20% of all pixels
                            iaa.Sharpen((0.1, .8)),  # sharpen the image
                            # iaa.GaussianBlur(sigma=(2., 3.5)),
                            iaa.OneOf([
                                iaa.GaussianBlur(sigma=(2., 3.5)),
                                iaa.AverageBlur(k=(2, 5)),
                                iaa.BilateralBlur(d=(7, 12),
                                                  sigma_color=(10, 250),
                                                  sigma_space=(10, 250)),
                                iaa.MedianBlur(k=(3, 7)),
                            ]),
                            iaa.AddElementwise((-50, 50)),
                            iaa.AdditiveGaussianNoise(scale=(0, 0.1 * 255)),
                            iaa.JpegCompression(compression=(80, 95)),
                            iaa.Multiply((0.5, 1.5)),
                            iaa.MultiplyElementwise((0.5, 1.5)),
                            iaa.ReplaceElementwise(0.05, [0, 255]),
                            # iaa.WithColorspace(to_colorspace="HSV", from_colorspace="RGB",
                            #                 children=iaa.WithChannels(2, iaa.Add((-10, 50)))),
                            iaa.OneOf([
                                iaa.WithColorspace(to_colorspace="HSV",
                                                   from_colorspace="RGB",
                                                   children=iaa.WithChannels(
                                                       1, iaa.Add((-10, 50)))),
                                iaa.WithColorspace(to_colorspace="HSV",
                                                   from_colorspace="RGB",
                                                   children=iaa.WithChannels(
                                                       2, iaa.Add((-10, 50)))),
                            ]),
                        ],
                        random_order=True)),
                iaa.Fliplr(.5),
                iaa.Flipud(.125),
                # # augment changing bboxes
                self.bbox_augsometimes(
                    iaa.Affine(
                        # translate_px={"x": 40, "y": 60},
                        scale={
                            "x": (0.8, 1.2),
                            "y": (0.8, 1.2)
                        },
                        translate_percent={
                            "x": (-0.1, 0.1),
                            "y": (-0.1, 0.1)
                        },
                        rotate=(-5, 5),
                    ))
            ],
            random_order=True)

        # torch.manual_seed(23)
        with open(list_file) as f:
            lines = f.readlines()

        if little_train:
            lines = lines[:little_train]

        for line in lines:
            splited = line.strip().split()
            self.fnames.append(splited[0])

        self.num_samples = len(self.fnames)
def augmentation(image, mask=None):
    sometimes = lambda aug: iaa.Sometimes(0.5, aug)

    seq = iaa.Sequential(
        [
            iaa.Fliplr(0.5),
            iaa.Flipud(0.2),
            sometimes(
                iaa.CropAndPad(
                    percent=(-0.05, 0.1), pad_mode=ia.ALL, pad_cval=(0, 255))),
            sometimes(
                iaa.Affine(scale={
                    "x": (0.8, 1.2),
                    "y": (0.8, 1.2)
                },
                           translate_percent={
                               "x": (-0.2, 0.2),
                               "y": (-0.2, 0.2)
                           },
                           rotate=(-45, 45),
                           shear=(-16, 16),
                           order=[0, 1],
                           cval=(0, 255),
                           mode=ia.ALL)),
            # execute 0 to 5 of the following (less important) augmenters per image
            iaa.SomeOf((0, 5), [
                sometimes(
                    iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))),
                iaa.OneOf([
                    iaa.GaussianBlur((0, 3.0)),
                    iaa.AverageBlur(k=(2, 7)),
                    iaa.MedianBlur(k=(3, 11)),
                ]),
                iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),
                iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),
                iaa.SimplexNoiseAlpha(
                    iaa.OneOf([
                        iaa.EdgeDetect(alpha=(0.5, 1.0)),
                        iaa.DirectedEdgeDetect(alpha=(0.5, 1.0),
                                               direction=(0.0, 1.0)),
                    ])),
                iaa.AdditiveGaussianNoise(
                    loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
                iaa.OneOf([
                    iaa.Dropout((0.01, 0.1), per_channel=0.5),
                    iaa.CoarseDropout((0.03, 0.15),
                                      size_percent=(0.02, 0.05),
                                      per_channel=0.2),
                ]),
                iaa.Invert(0.05, per_channel=True),
                iaa.Add((-10, 10), per_channel=0.5),
                iaa.AddToHueAndSaturation((-20, 20)),
                iaa.OneOf([
                    iaa.Multiply((0.5, 1.5), per_channel=0.5),
                ]),
                iaa.Grayscale(alpha=(0.0, 1.0)),
                sometimes(
                    iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)),
                sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
                sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
            ],
                       random_order=True)
        ],
        random_order=True)
    if mask is None:
        image_heavy = seq(images=image)
        return image_heavy
    else:
        if image.ndim == 4:
            mask = np.array(mask)
            image_heavy, mask_heavy = seq(images=image,
                                          segmentation_maps=mask.astype(
                                              np.int32))
        else:
            image_heavy, mask_heavy = seq(images=image[np.newaxis, ...],
                                          segmentation_maps=mask[np.newaxis,
                                                                 ...])
            image_heavy, mask_heavy = image_heavy[0], mask_heavy[0]
        return image_heavy, mask_heavy
Пример #11
0
    plt.title("masks")
    plt.imshow(np.array(mask, dtype=np.float32))


# load some images and corresponding masks from the training data (different size)
x = np.array([cv2.imread("images/" + str(i) + ".png") for i in range(0, 8)])
y = np.array([cv2.imread("masks/" + str(i) + ".png") for i in range(0, 8)])

visualizeImageAndMask(x[0], y[0])

# example for having different sizes across the images.

# define the augmentation
seq1 = iaa.Sequential([iaa.Affine(rotate=(-30, 30))
                       ])  # Augmentation for images and masks
seq2 = iaa.Sequential([iaa.Dropout([0.1, 0.5])])  # Augmentation for images
"""
    Method 1
"""

# It feels clumsy cause we have to define a pipeline and calling different augmenters
# However, we can access the image directly
for img, mask in zip(x, y):
    seq1.deterministic = True

    image_augmented = seq1.augment_image(image=img)
    final_image = seq2.augment_image(image=image_augmented)

    mask_augmented = seq1.augment_image(image=mask)

    visualizeImageAndMask(final_image, mask_augmented)
args = sys.argv

if len(args) != 5:
    print("変数指定して。1は牌の個数、2はサイズ,3ha kosuu,4ha test kosuu")

NUMBER = int(args[1])  #画像1枚当たりの牌の枚数

size_x = int(args[2])
size_y = size_x

pic_num = int(args[3])
test_num = int(args[4])

# In[3]:

aug1 = iaa.Dropout(p=0.2)
aug2 = iaa.AverageBlur(k=(5, 15))
aug3 = iaa.Add((-40, 40), per_channel=0.5)
aug4 = iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)
aug5 = iaa.Affine(rotate=(0, 20))

# In[4]:


#画像のロバスト
def augment(img, bb, aug):
    # 画像とバウンディングボックスを変換
    aug_img = aug.augment_image(img)
    aug_bb = aug.augment_bounding_boxes(
        [bb])[0].remove_out_of_image().cut_out_of_image()
    '''
Пример #13
0
def main():
    '''
	Dirty image augmentation to get better recognition for systems presentation.

    '''
    augs = [
        "superpixel", "colorspace", "grayscale", "gaussian_blur",
        "average_blur", "median_blur", "edge_detect", "add", "add_eltwise",
        "invert", "contrast_norm", "dropout"
    ]

    superpixel = iaa.Superpixels(p_replace=(0.4, 0.6), n_segments=(16, 64))
    #colorspace    = iaa.Sequential([iaa.ChangeColorspace(from_colorspace="BGR",  to_colorspace="HSV"), iaa.WithChannels(0, iaa.Add(-50, 50), iaa.ChangeColorspace(from_colorspace="BGR", to_colorspace="BGR")])
    grayscale = iaa.Grayscale(alpha=(0.0, 1.0))
    gaussian_blur = iaa.GaussianBlur(sigma=(0.0, 3.0))
    average_blur = iaa.AverageBlur(k=(2, 10))
    median_blur = iaa.MedianBlur(k=(5, 11))
    edge_detect = iaa.EdgeDetect(alpha=(0.0, 1.0))
    add = iaa.Add((-50, 50), per_channel=0.5)
    add_eltwise = iaa.AddElementwise((-50, 50), per_channel=0.5)
    invert = iaa.Invert(0.25, per_channel=0.5)
    contrast_norm = iaa.ContrastNormalization((0.5, 1.5), per_channel=0.5)
    dropout = iaa.Dropout(p=(0, 0.3), per_channel=0.5)

    image_paths = get_files("JPG")
    cv_images = get_images(image_paths)

    for augmentation in augs:
        if augmentation == "superpixel":
            aug_images = superpixel.augment_images(cv_images)
            save_augmented_images("superpixel", aug_images, image_paths)

        #elif augmentation == "colorspace":
        #   aug_images = colorspace.augment_images(cv_images)
        #    save_augmented_images("colorspace", aug_images, image_paths)

        elif augmentation == "grayscale":
            aug_images = grayscale.augment_images(cv_images)
            save_augmented_images("grayscale", aug_images, image_paths)

        elif augmentation == "gaussian_blur":
            aug_images = gaussian_blur.augment_images(cv_images)
            save_augmented_images("gaussian_blur", aug_images, image_paths)

        elif augmentation == "average_blur":
            aug_images = average_blur.augment_images(cv_images)
            save_augmented_images("average_blur", aug_images, image_paths)

        elif augmentation == "edge_detect":
            aug_images = edge_detect.augment_images(cv_images)
            save_augmented_images("edge_detect", aug_images, image_paths)

        elif augmentation == "add":
            aug_images = add.augment_images(cv_images)
            save_augmented_images("add", aug_images, image_paths)

        elif augmentation == "add_eltwise":
            aug_images = add_eltwise.augment_images(cv_images)
            save_augmented_images("add_eltwise", aug_images, image_paths)

        elif augmentation == "invert":
            aug_images = invert.augment_images(cv_images)
            save_augmented_images("invert", aug_images, image_paths)

        elif augmentation == "contrast_norm":
            aug_images = contrast_norm.augment_images(cv_images)
            save_augmented_images("contrast_norm", aug_images, image_paths)

        elif augmentation == "dropout":
            aug_images = dropout.augment_images(cv_images)
            save_augmented_images("dropout", aug_images, image_paths)
Пример #14
0
from pycocotools.coco import COCO
import torch

import imgaug as ia
import imgaug.augmenters as iaa

aug_seq = iaa.Sequential([
    iaa.AdditiveGaussianNoise(scale=(0, 0.025 * 255)),
    iaa.MultiplyHue((0.75, 1.25)),
    iaa.Add((-80, 80))
])
heavy_aug_seq = iaa.Sequential([
    iaa.AdditiveGaussianNoise(scale=(0, 0.025 * 255)),
    iaa.MultiplyHue((0.75, 1.25)),
    iaa.Add((-80, 80)),
    iaa.Dropout(p=(0, 0.3)),
    iaa.imgcorruptlike.MotionBlur(severity=(1, 3)),
    iaa.GammaContrast((0.0, 2.0), per_channel=True)
])
category_map = ['background', 'person']


def load_coco_json_lines(fpath, image_folder=None):
    cocoGt = COCO(fpath)
    records = []
    for image_id in cocoGt.getImgIds():
        img_file = cocoGt.loadImgs(ids=[image_id])[0]
        record = {
            'ID': img_file['file_name'].split('.')[0],
            'height': img_file['height'],
            'width': img_file['width'],
Пример #15
0
    def __init__(self,
                 images,
                 config,
                 shuffle=True,
                 jitter=True,
                 norm=None,
                 flipflop=True,
                 shoechanger=True,
                 zeropad=True):
        self.generator = None

        self.flipflop = flipflop
        self.shoechanger = shoechanger
        if self.flipflop or self.shoechanger:
            self.badshoes = []
            for im in os.listdir('imgs/more_badshoes'):
                self.badshoes.append(cv2.imread('imgs/more_badshoes/' + im))

        self.zeropad = zeropad

        self.images = images
        self.config = config

        self.shuffle = shuffle
        self.jitter = jitter
        self.norm = norm

        self.anchors = [
            BoundBox(0, 0, config['ANCHORS'][2 * i],
                     config['ANCHORS'][2 * i + 1])
            for i in range(int(len(config['ANCHORS']) // 2))
        ]

        ### augmentors by https://github.com/aleju/imgaug
        sometimes = lambda aug: iaa.Sometimes(0.5, aug)

        # Define our sequence of augmentation steps that will be applied to every image
        # All augmenters with per_channel=0.5 will sample one value _per image_
        # in 50% of all cases. In all other cases they will sample new values
        # _per channel_.
        self.aug_pipe = iaa.Sequential(
            [
                # apply the following augmenters to most images
                # iaa.Fliplr(0.5), # horizontally flip 50% of all images
                # iaa.Flipud(0.2), # vertically flip 20% of all images
                # sometimes(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
                sometimes(
                    iaa.Affine(
                        # scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
                        # translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
                        # rotate=(-5, 5), # rotate by -45 to +45 degrees
                        # shear=(-5, 5), # shear by -16 to +16 degrees
                        # order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
                        # cval=(0, 255), # if mode is constant, use a cval between 0 and 255
                        # mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                    )),
                # execute 0 to 5 of the following (less important) augmenters per image
                # don't execute all of them, as that would often be way too strong
                iaa.SomeOf(
                    (0, 5),
                    [
                        # sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
                        iaa.OneOf([
                            iaa.GaussianBlur(
                                (0, 3.0)
                            ),  # blur images with a sigma between 0 and 3.0
                            iaa.AverageBlur(k=(2, 7)),
                            # blur image using local means with kernel sizes between 2 and 7
                            iaa.MedianBlur(k=(3, 11)),
                            # blur image using local medians with kernel sizes between 2 and 7
                        ]),
                        iaa.Sharpen(alpha=(0, 1.0),
                                    lightness=(0.75, 1.5)),  # sharpen images
                        # iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
                        # search either for all edges or for directed edges
                        # sometimes(iaa.OneOf([
                        #    iaa.EdgeDetect(alpha=(0, 0.7)),
                        #    iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)),
                        # ])),
                        iaa.AdditiveGaussianNoise(
                            loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
                        # add gaussian noise to images
                        iaa.OneOf([
                            iaa.Dropout(
                                (0.01, 0.1), per_channel=0.5
                            ),  # randomly remove up to 10% of the pixels
                            # iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
                        ]),
                        # iaa.Invert(0.05, per_channel=True), # invert color channels
                        iaa.Add((-10, 10), per_channel=0.5),
                        # change brightness of images (by -10 to 10 of original value)
                        iaa.Multiply((0.5, 1.5), per_channel=0.5),
                        # change brightness of images (50-150% of original value)
                        iaa.ContrastNormalization(
                            (0.5, 2.0),
                            per_channel=0.5),  # improve or worsen the contrast
                        # iaa.Grayscale(alpha=(0.0, 1.0)),
                        # sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
                        # sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))) # sometimes move parts of the image around
                    ],
                    random_order=True)
            ],
            random_order=True)

        if shuffle:
            np.random.shuffle(self.images)
    def __init__(self):
        configMain.__init__(self)

        st = lambda aug: iaa.Sometimes(0.2, aug)
        oc = lambda aug: iaa.Sometimes(0.1, aug)
        rl = lambda aug: iaa.Sometimes(0.05, aug)

        self.augment = [
            iaa.Sequential(
                [
                    rl(iaa.GaussianBlur(
                        (0,
                         1.3))),  # blur images with a sigma between 0 and 1.5
                    rl(
                        iaa.AdditiveGaussianNoise(
                            loc=0, scale=(0.0, 0.05),
                            per_channel=0.5)),  # add gaussian noise to images
                    rl(iaa.Dropout((0.0, 0.10), per_channel=0.5)
                       ),  # randomly remove up to X% of the pixels
                    oc(
                        iaa.Add((-20, 20), per_channel=0.5)
                    ),  # change brightness of images (by -X to Y of original value)
                    st(iaa.Multiply(
                        (0.25, 2.5), per_channel=0.2
                    )),  # change brightness of images (X-Y% of original value)
                    rl(iaa.ContrastNormalization(
                        (0.5, 1.5),
                        per_channel=0.5)),  # improve or worsen the contrast
                    rl(iaa.Grayscale((0.0, 1))),  # put grayscale
                ],
                random_order=True  # do all of the above in random order
            )
        ] * 3

        all_files = []
        self.val_db_path = []
        ids = [
            "steer103_v5_way_v2", 'steer103_v5_way_v2_town02',
            "nonoise_town03_way", "nonoise_town02_way", "nonoise_town04_way"
        ]

        for id in ids:
            all_files += glob.glob(
                "/data/yang/code/aws/scratch/carla_collect/" + id +
                "/*/data_*.h5")

            for valid in range(1, 15, 3):
                self.val_db_path += glob.glob(
                    "/data/yang/code/aws/scratch/carla_collect/" + id +
                    "/*WeatherId=" + str(valid).zfill(2) + "/data_*.h5")

        self.train_db_path = list(set(all_files) - set(self.val_db_path))

        self.speed_factor = 40.0  # In KM/H

        # The division is made by three diferent data kinds
        # in every mini-batch there will be equal number of samples with labels from each group
        # e.g. for [[0,1],[2]] there will be 50% samples with labels 0 and 1, and 50% samples with label 2
        self.labels_per_division = [[0, 2, 5], [3], [4]]
        self.dataset_names = ['targets']
        self.queue_capacity = 5  # now measured in how many batches
def _get_aug():

    import imgaug as ia
    from imgaug import augmenters as iaa

    sometimes = lambda aug: iaa.Sometimes(0.5, aug)

    return iaa.Sequential(
        [
            # apply the following augmenters to most images
            iaa.Fliplr(0.5), # horizontally flip 50% of all images
            iaa.Flipud(0.2), # vertically flip 20% of all images
            # crop images by -5% to 10% of their height/width
            sometimes(iaa.CropAndPad(
                percent=(-0.05, 0.1),
                pad_mode=ia.ALL,
                pad_cval=(0, 255)
            )),
            sometimes(iaa.Affine(
                scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
                translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis)
                rotate=(-45, 45), # rotate by -45 to +45 degrees
                shear=(-16, 16), # shear by -16 to +16 degrees
                order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
                cval=(0, 255), # if mode is constant, use a cval between 0 and 255
                mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
            )),
            # execute 0 to 5 of the following (less important) augmenters per image
            # don't execute all of them, as that would often be way too strong
            iaa.SomeOf((0, 5),
                [
                    sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
                    iaa.OneOf([
                        iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
                        iaa.AverageBlur(k=(2, 7)), # blur image using local means with kernel sizes between 2 and 7
                        iaa.MedianBlur(k=(3, 11)), # blur image using local medians with kernel sizes between 2 and 7
                    ]),
                    iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
                    iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
                    # search either for all edges or for directed edges,
                    # blend the result with the original image using a blobby mask
                    iaa.SimplexNoiseAlpha(iaa.OneOf([
                        iaa.EdgeDetect(alpha=(0.5, 1.0)),
                        iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)),
                    ])),
                    iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images
                    iaa.OneOf([
                        iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels
                        iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
                    ]),
                    iaa.Invert(0.05, per_channel=True), # invert color channels
                    iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value)
                    iaa.AddToHueAndSaturation((-20, 20)), # change hue and saturation
                    # either change the brightness of the whole image (sometimes
                    # per channel) or change the brightness of subareas
                    iaa.OneOf([
                        iaa.Multiply((0.5, 1.5), per_channel=0.5),
                        iaa.FrequencyNoiseAlpha(
                            exponent=(-4, 0),
                            first=iaa.Multiply((0.5, 1.5), per_channel=True),
                            second=iaa.ContrastNormalization((0.5, 2.0))
                        )
                    ]),
                    iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast
                    iaa.Grayscale(alpha=(0.0, 1.0)),
                    sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
                    sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))), # sometimes move parts of the image around
                    sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
                ],
                random_order=True
            )
        ],
        random_order=True
    )
Пример #18
0
            iaa.Pad(percent=0.125, keep_size=False),
            iaa.Crop(percent=0.10, keep_size=False)
        ]),
        iaa.ContrastNormalization((0.8, 1.2)),
        iaa.Grayscale(alpha=(0.93, 1.0)),
        iaa.Affine(translate_percent={
            "x": (-0.2, 0.2),
            "y": (-0.2, 0.2)
        }),

        # Flip Right and Left
        iaa.Fliplr(0.5),
        # Rotate 5 Degrees left and right
        iaa.Affine(rotate=10),
        # Similar to Cutout
        iaa.Dropout(p=(0.25)),
    ],
    name='Cutout')  # apply augmenters in random order

fn = 'config/resnet18_num_0.json'

config = dict(json.load(open(fn, 'r')))

aug_func = seq_cutout.augment
BASE_DIR = 'tiny-imagenet-200'
val_df = pd.read_csv('tiny-imagenet-200/val_annotations.txt',
                     sep='\t',
                     header=None,
                     usecols=[0, 1],
                     names=['filename', 'class'])
Пример #19
0
def run_seq():
    # Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
    # e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second
    # image.
    # if not isinstance(images,list):
    #     images=[images]

    sometimes = lambda aug: iaa.Sometimes(0.5, aug)

    # Define our sequence of augmentation steps that will be applied to every image.
    seq = iaa.Sequential(
        [
            #
            # Apply the following augmenters to most images.
            #
            # iaa.Fliplr(0.5),  # horizontally flip 50% of all images
            # iaa.Flipud(0.2),  # vertically flip 20% of all images

            # crop some of the images by 0-10% of their height/width
            # sometimes(iaa.Crop(percent=(0, 0.1))),
            # sometimes(iaa.Crop(percent=(0, 0.05))),

            # Apply affine transformations to some of the images
            # - scale to 80-120% of image height/width (each axis independently)
            # - translate by -20 to +20 relative to height/width (per axis)
            # - rotate by -45 to +45 degrees
            # - shear by -16 to +16 degrees
            # - order: use nearest neighbour or bilinear interpolation (fast)
            # - mode: use any available mode to fill newly created pixels
            #         see API or scikit-image for which modes are available
            # - cval: if the mode is constant, then use a random brightness
            #         for the newly created pixels (e.g. sometimes black,
            #         sometimes white)
            sometimes(
                iaa.Affine(
                    scale={
                        "x": (0.8, 1.2),
                        "y": (0.8, 1.2)
                    },
                    translate_percent={
                        "x": (-0.2, 0.2),
                        "y": (-0.2, 0.2)
                    },
                    # rotate=(-45, 45),
                    rotate=(-5, 5),
                    # shear=(-16, 16),
                    shear=(-5, 5),
                    order=[0, 1],
                    # cval=(0, 255),
                    cval=144,  # 填充像素值
                    # mode=ia.ALL # 默认常数值填充边界
                )),

            #
            # Execute 0 to 5 of the following (less important) augmenters per
            # image. Don't execute all of them, as that would often be way too
            # strong.
            #
            iaa.SomeOf(
                (0, 5),
                [
                    # Convert some images into their superpixel representation,
                    # sample between 20 and 200 superpixels per image, but do
                    # not replace all superpixels with their average, only
                    # some of them (p_replace).
                    sometimes(
                        iaa.Superpixels(p_replace=(0, 1.0),
                                        n_segments=(20, 200))),

                    # Blur each image with varying strength using
                    # gaussian blur (sigma between 0 and 3.0),
                    # average/uniform blur (kernel size between 2x2 and 7x7)
                    # median blur (kernel size between 3x3 and 11x11).
                    iaa.OneOf([
                        iaa.GaussianBlur((0, 3.0)),
                        iaa.AverageBlur(k=(2, 7)),
                        iaa.MedianBlur(k=(3, 11)),
                    ]),

                    # Sharpen each image, overlay the result with the original
                    # image using an alpha between 0 (no sharpening) and 1
                    # (full sharpening effect).
                    iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),

                    # Same as sharpen, but for an embossing effect.
                    iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),

                    # Search in some images either for all edges or for
                    # directed edges. These edges are then marked in a black
                    # and white image and overlayed with the original image
                    # using an alpha of 0 to 0.7.
                    sometimes(
                        iaa.OneOf([
                            iaa.EdgeDetect(alpha=(0, 0.7)),
                            iaa.DirectedEdgeDetect(alpha=(0, 0.7),
                                                   direction=(0.0, 1.0)),
                        ])),

                    # Add gaussian noise to some images.
                    # In 50% of these cases, the noise is randomly sampled per
                    # channel and pixel.
                    # In the other 50% of all cases it is sampled once per
                    # pixel (i.e. brightness change).
                    iaa.AdditiveGaussianNoise(
                        loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),

                    # Either drop randomly 1 to 10% of all pixels (i.e. set
                    # them to black) or drop them on an image with 2-5% percent
                    # of the original size, leading to large dropped
                    # rectangles.
                    iaa.OneOf([
                        iaa.Dropout((0.01, 0.1), per_channel=0.5),
                        iaa.CoarseDropout((0.03, 0.15),
                                          size_percent=(0.02, 0.05),
                                          per_channel=0.2),
                    ]),

                    # Invert each image's channel with 5% probability.
                    # This sets each pixel value v to 255-v.
                    iaa.Invert(0.05,
                               per_channel=True),  # invert color channels

                    # Add a value of -10 to 10 to each pixel.
                    iaa.Add((-10, 10), per_channel=0.5),

                    # Change brightness of images (50-150% of original value).
                    iaa.Multiply((0.5, 1.5), per_channel=0.5),

                    # Improve or worsen the contrast of images.
                    iaa.LinearContrast((0.5, 2.0), per_channel=0.5),

                    # Convert each image to grayscale and then overlay the
                    # result with the original with random alpha. I.e. remove
                    # colors with varying strengths.
                    iaa.Grayscale(alpha=(0.0, 1.0)),

                    # In some images move pixels locally around (with random
                    # strengths).
                    sometimes(
                        iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)
                    ),

                    # In some images distort local areas with varying strength.
                    sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05)))
                ],
                # do all of the above augmentations in random order
                random_order=True)
        ],
        # do all of the above augmentations in random order
        random_order=True)

    # images_aug = seq(images=images)

    return seq
Пример #20
0
def main():
    parser = argparse.ArgumentParser(description="Check augmenters visually.")
    parser.add_argument(
        '--only',
        default=None,
        help=
        "If this is set, then only the results of an augmenter with this name will be shown. Optionally, comma-separated list.",
        required=False)
    args = parser.parse_args()

    images = [
        ia.quokka_square(size=(128, 128)),
        misc.imresize(data.astronaut(), (128, 128))
    ]

    keypoints = [
        ia.KeypointsOnImage([
            ia.Keypoint(x=50, y=40),
            ia.Keypoint(x=70, y=38),
            ia.Keypoint(x=62, y=52)
        ],
                            shape=images[0].shape),
        ia.KeypointsOnImage([
            ia.Keypoint(x=55, y=32),
            ia.Keypoint(x=42, y=95),
            ia.Keypoint(x=75, y=89)
        ],
                            shape=images[1].shape)
    ]

    bounding_boxes = [
        ia.BoundingBoxesOnImage([
            ia.BoundingBox(x1=10, y1=10, x2=20, y2=20),
            ia.BoundingBox(x1=40, y1=50, x2=70, y2=60)
        ],
                                shape=images[0].shape),
        ia.BoundingBoxesOnImage([
            ia.BoundingBox(x1=10, y1=10, x2=20, y2=20),
            ia.BoundingBox(x1=40, y1=50, x2=70, y2=60)
        ],
                                shape=images[1].shape)
    ]

    # missing: InColorspace, Lambda, AssertLambda, AssertShape, Convolve
    augmenters = [
        iaa.Sequential([
            iaa.CoarseDropout(p=0.5, size_percent=0.05),
            iaa.AdditiveGaussianNoise(scale=0.1 * 255),
            iaa.Crop(percent=0.1)
        ],
                       name="Sequential"),
        iaa.SomeOf(2,
                   children=[
                       iaa.CoarseDropout(p=0.5, size_percent=0.05),
                       iaa.AdditiveGaussianNoise(scale=0.1 * 255),
                       iaa.Crop(percent=0.1)
                   ],
                   name="SomeOf"),
        iaa.OneOf(children=[
            iaa.CoarseDropout(p=0.5, size_percent=0.05),
            iaa.AdditiveGaussianNoise(scale=0.1 * 255),
            iaa.Crop(percent=0.1)
        ],
                  name="OneOf"),
        iaa.Sometimes(0.5,
                      iaa.AdditiveGaussianNoise(scale=0.1 * 255),
                      name="Sometimes"),
        iaa.WithColorspace("HSV",
                           children=[iaa.Add(20)],
                           name="WithColorspace"),
        iaa.WithChannels([0], children=[iaa.Add(20)], name="WithChannels"),
        iaa.AddToHueAndSaturation((-20, 20),
                                  per_channel=True,
                                  name="AddToHueAndSaturation"),
        iaa.Noop(name="Noop"),
        iaa.Scale({
            "width": 64,
            "height": 64
        }, name="Scale"),
        iaa.CropAndPad(px=(-8, 8), name="CropAndPad-px"),
        iaa.Pad(px=(0, 8), name="Pad-px"),
        iaa.Crop(px=(0, 8), name="Crop-px"),
        iaa.Crop(percent=(0, 0.1), name="Crop-percent"),
        iaa.Fliplr(0.5, name="Fliplr"),
        iaa.Flipud(0.5, name="Flipud"),
        iaa.Superpixels(p_replace=0.75, n_segments=50, name="Superpixels"),
        iaa.Grayscale(0.5, name="Grayscale0.5"),
        iaa.Grayscale(1.0, name="Grayscale1.0"),
        iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
        iaa.AverageBlur(k=(3, 11), name="AverageBlur"),
        iaa.MedianBlur(k=(3, 11), name="MedianBlur"),
        iaa.BilateralBlur(d=10, name="BilateralBlur"),
        iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0, 2.0), name="Sharpen"),
        iaa.Emboss(alpha=(0.1, 1.0), strength=(0, 2.0), name="Emboss"),
        iaa.EdgeDetect(alpha=(0.1, 1.0), name="EdgeDetect"),
        iaa.DirectedEdgeDetect(alpha=(0.1, 1.0),
                               direction=(0, 1.0),
                               name="DirectedEdgeDetect"),
        iaa.Add((-50, 50), name="Add"),
        iaa.Add((-50, 50), per_channel=True, name="AddPerChannel"),
        iaa.AddElementwise((-50, 50), name="AddElementwise"),
        iaa.AdditiveGaussianNoise(loc=0,
                                  scale=(0.0, 0.1 * 255),
                                  name="AdditiveGaussianNoise"),
        iaa.Multiply((0.5, 1.5), name="Multiply"),
        iaa.Multiply((0.5, 1.5), per_channel=True, name="MultiplyPerChannel"),
        iaa.MultiplyElementwise((0.5, 1.5), name="MultiplyElementwise"),
        iaa.Dropout((0.0, 0.1), name="Dropout"),
        iaa.CoarseDropout(p=0.05,
                          size_percent=(0.05, 0.5),
                          name="CoarseDropout"),
        iaa.Invert(p=0.5, name="Invert"),
        iaa.Invert(p=0.5, per_channel=True, name="InvertPerChannel"),
        iaa.ContrastNormalization(alpha=(0.5, 2.0),
                                  name="ContrastNormalization"),
        iaa.SaltAndPepper(p=0.05, name="SaltAndPepper"),
        iaa.Salt(p=0.05, name="Salt"),
        iaa.Pepper(p=0.05, name="Pepper"),
        iaa.CoarseSaltAndPepper(p=0.05,
                                size_percent=(0.01, 0.1),
                                name="CoarseSaltAndPepper"),
        iaa.CoarseSalt(p=0.05, size_percent=(0.01, 0.1), name="CoarseSalt"),
        iaa.CoarsePepper(p=0.05, size_percent=(0.01, 0.1),
                         name="CoarsePepper"),
        iaa.Affine(scale={
            "x": (0.8, 1.2),
            "y": (0.8, 1.2)
        },
                   translate_px={
                       "x": (-16, 16),
                       "y": (-16, 16)
                   },
                   rotate=(-45, 45),
                   shear=(-16, 16),
                   order=ia.ALL,
                   cval=(0, 255),
                   mode=ia.ALL,
                   name="Affine"),
        iaa.PiecewiseAffine(scale=0.03,
                            nb_rows=(2, 6),
                            nb_cols=(2, 6),
                            name="PiecewiseAffine"),
        iaa.PerspectiveTransform(scale=0.1, name="PerspectiveTransform"),
        iaa.ElasticTransformation(alpha=(0.5, 8.0),
                                  sigma=1.0,
                                  name="ElasticTransformation"),
        iaa.Alpha(factor=(0.0, 1.0),
                  first=iaa.Add(100),
                  second=iaa.Dropout(0.5),
                  per_channel=False,
                  name="Alpha"),
        iaa.Alpha(factor=(0.0, 1.0),
                  first=iaa.Add(100),
                  second=iaa.Dropout(0.5),
                  per_channel=True,
                  name="AlphaPerChannel"),
        iaa.Alpha(factor=(0.0, 1.0),
                  first=iaa.Affine(rotate=(-45, 45)),
                  per_channel=True,
                  name="AlphaAffine"),
        iaa.AlphaElementwise(factor=(0.0, 1.0),
                             first=iaa.Add(50),
                             second=iaa.ContrastNormalization(2.0),
                             per_channel=False,
                             name="AlphaElementwise"),
        iaa.AlphaElementwise(factor=(0.0, 1.0),
                             first=iaa.Add(50),
                             second=iaa.ContrastNormalization(2.0),
                             per_channel=True,
                             name="AlphaElementwisePerChannel"),
        iaa.AlphaElementwise(factor=(0.0, 1.0),
                             first=iaa.Affine(rotate=(-45, 45)),
                             per_channel=True,
                             name="AlphaElementwiseAffine"),
        iaa.SimplexNoiseAlpha(
            #first=iaa.GaussianBlur((1.0, 3.0)),
            #first=iaa.MedianBlur((3, 7)),
            first=iaa.EdgeDetect(1.0),
            #first=iaa.Affine(rotate=-45), #(-45, 45)),
            per_channel=False,
            name="SimplexNoiseAlpha"),
        iaa.FrequencyNoiseAlpha(
            #first=iaa.GaussianBlur((1.0, 3.0)),
            #first=iaa.MedianBlur((3, 7)),
            first=iaa.EdgeDetect(1.0),
            #first=iaa.Affine(rotate=-45), #(-45, 45)),
            per_channel=False,
            name="FrequencyNoiseAlpha")
    ]

    augmenters.append(
        iaa.Sequential([iaa.Sometimes(0.2, aug.copy()) for aug in augmenters],
                       name="Sequential"))
    augmenters.append(
        iaa.Sometimes(0.5, [aug.copy() for aug in augmenters],
                      name="Sometimes"))

    for augmenter in augmenters:
        if args.only is None or augmenter.name in [
                v.strip() for v in args.only.split(",")
        ]:
            print("Augmenter: %s" % (augmenter.name, ))
            grid = []
            for image, kps, bbs in zip(images, keypoints, bounding_boxes):
                aug_det = augmenter.to_deterministic()
                imgs_aug = aug_det.augment_images(
                    np.tile(image[np.newaxis, ...], (16, 1, 1, 1)))
                kps_aug = aug_det.augment_keypoints([kps] * 16)
                bbs_aug = aug_det.augment_bounding_boxes([bbs] * 16)
                imgs_aug_drawn = [
                    kps_aug_one.draw_on_image(img_aug)
                    for img_aug, kps_aug_one in zip(imgs_aug, kps_aug)
                ]
                imgs_aug_drawn = [
                    bbs_aug_one.draw_on_image(img_aug)
                    for img_aug, bbs_aug_one in zip(imgs_aug_drawn, bbs_aug)
                ]
                grid.append(np.hstack(imgs_aug_drawn))
            misc.imshow(np.vstack(grid))
Пример #21
0
    def __init__(self, images,
                       config,
                       shuffle=True,
                       jitter=True,
                       norm=None):
        self.generator = None

        self.images = images
        self.config = config

        self.shuffle = shuffle
        self.jitter  = jitter
        self.norm    = norm
        self.image_counter = 0

        ia.seed( 1 )


        sometimes = lambda aug: iaa.Sometimes(0.8, aug)

        # Here the augmentation is defined but not executed.
        # Define our sequence of augmentation steps that will be applied to every image
        # All augmenters with per_channel=0.5 will sample one value _per image_
        # in 50% of all cases. In all other cases they will sample new values
        # _per channel_.
        self.aug_pipe = iaa.Sequential(
            [
                sometimes( iaa.Affine(
                    #scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis
                    translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}, # translate by -20 to +20 percent (per axis)
                    rotate=(-10, 10), # rotate by -45 to +45 degrees
                    #shear=(-5, 5), # shear by -16 to +16 degrees
                    #order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
                    #cval=(0, 255), # if mode is constant, use a cval between 0 and 255
                    #mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                    mode = "edge"

                )),
                iaa.SomeOf((0, 5),
                    [
                        #sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation
                        iaa.OneOf([
                            iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0
                            iaa.AverageBlur(k=(2, 7)), # blur image using local means with kernel sizes between 2 and 7
                            iaa.MedianBlur(k=(3, 11)), # blur image using local medians with kernel sizes between 2 and 7
                        ]),
                        #    iaa.CoarseSalt(0.01, size_percent=(0.002, 0.01)),
                        iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images
                        #iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images
                        # search either for all edges or for directed edges
                        #sometimes(iaa.OneOf([
                        #    iaa.EdgeDetect(alpha=(0, 0.7)),
                        #    iaa.DirectedEdgeDetect(alpha=(0, 0.7), direction=(0.0, 1.0)),
                        #])),
                        iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.1*255), per_channel=0), # add gaussian noise to images
                        iaa.OneOf([
                            iaa.Dropout((0.01, 0.1), per_channel=0), # randomly remove up to 10% of the pixels
                            #iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2),
                        ]),
                        #iaa.Invert(0.05, per_channel=True), # invert color channels
                        iaa.Add((-15, 15), per_channel=0), # change brightness of images (by -10 to 10 of original value)
                        iaa.Multiply((0.5, 1.5), per_channel=0), # change brightness of images (50-150% of original value)
                        iaa.ContrastNormalization((0.5, 2.0), per_channel=0), # improve or worsen the contrast
                        #iaa.Grayscale(alpha=(0.0, 1.0)),
                        #sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths)
                        #sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))) # sometimes move parts of the image around
                    ])

            ],
            random_order=True
        )

        if shuffle: np.random.shuffle(self.images)
Пример #22
0
def test_keypoint_augmentation():
    reseed()

    keypoints = []
    for y in range(40//5):
        for x in range(60//5):
            keypoints.append(ia.Keypoint(y=y*5, x=x*5))

    keypoints_oi = ia.KeypointsOnImage(keypoints, shape=(40, 60, 3))
    keypoints_oi_empty = ia.KeypointsOnImage([], shape=(40, 60, 3))

    augs = [
        iaa.Add((-5, 5), name="Add"),
        iaa.AddElementwise((-5, 5), name="AddElementwise"),
        iaa.AdditiveGaussianNoise(0.01*255, name="AdditiveGaussianNoise"),
        iaa.Multiply((0.95, 1.05), name="Multiply"),
        iaa.Dropout(0.01, name="Dropout"),
        iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"),
        iaa.Invert(0.01, per_channel=True, name="Invert"),
        iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"),
        iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"),
        iaa.AverageBlur((3, 5), name="AverageBlur"),
        iaa.MedianBlur((3, 5), name="MedianBlur"),
        # iaa.BilateralBlur((3, 5), name="BilateralBlur"),
        # WithColorspace ?
        # iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"),
        # ChangeColorspace ?
        # Grayscale cannot be tested, input not RGB
        # Convolve ?
        iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
        iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
        iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
        iaa.DirectedEdgeDetect(alpha=(0.0, 0.1), direction=0, name="DirectedEdgeDetect"),
        iaa.Fliplr(0.5, name="Fliplr"),
        iaa.Flipud(0.5, name="Flipud"),
        iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"),
        iaa.Affine(translate_percent=(-0.05, 0.05), name="Affine-translate-percent"),
        iaa.Affine(rotate=(-20, 20), name="Affine-rotate"),
        iaa.Affine(shear=(-20, 20), name="Affine-shear"),
        iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"),
        iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"),
        # iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"),
        iaa.ElasticTransformation(alpha=(0.1, 0.2), sigma=(0.1, 0.2), name="ElasticTransformation"),
        # Sequential
        # SomeOf
        # OneOf
        # Sometimes
        # WithChannels
        # Noop
        # Lambda
        # AssertLambda
        # AssertShape
        iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"),
        iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"),
        iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"),
        iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Add(10),
                                name="SimplexNoiseAlpha"),
        iaa.Superpixels(p_replace=0.01, n_segments=64),
        iaa.Scale(0.5, name="Scale"),
        iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
        iaa.Pad(px=(0, 10), name="Pad"),
        iaa.Crop(px=(0, 10), name="Crop")
    ]

    for aug in augs:
        dss = []
        for i in range(10):
            aug_det = aug.to_deterministic()

            kp_fully_empty_aug = aug_det.augment_keypoints([])
            assert kp_fully_empty_aug == []

            kp_first_empty_aug = aug_det.augment_keypoints([keypoints_oi_empty])[0]
            assert len(kp_first_empty_aug.keypoints) == 0

            kp_image = keypoints_oi.to_keypoint_image(size=5)
            kp_image_aug = aug_det.augment_image(kp_image)
            kp_image_aug_rev = ia.KeypointsOnImage.from_keypoint_image(
                kp_image_aug,
                if_not_found_coords={"x": -9999, "y": -9999},
                nb_channels=1
            )
            kp_aug = aug_det.augment_keypoints([keypoints_oi])[0]
            ds = []
            assert len(kp_image_aug_rev.keypoints) == len(kp_aug.keypoints),\
                "Lost keypoints for '%s' (%d vs expected %d)" \
                % (aug.name, len(kp_aug.keypoints), len(kp_image_aug_rev.keypoints))
            for kp_pred, kp_pred_img in zip(kp_aug.keypoints, kp_image_aug_rev.keypoints):
                kp_pred_lost = (kp_pred.x == -9999 and kp_pred.y == -9999)
                kp_pred_img_lost = (kp_pred_img.x == -9999 and kp_pred_img.y == -9999)

                if not kp_pred_lost and not kp_pred_img_lost:
                    d = np.sqrt((kp_pred.x - kp_pred_img.x) ** 2
                                + (kp_pred.y - kp_pred_img.y) ** 2)
                    ds.append(d)
            dss.extend(ds)
            if len(ds) == 0:
                print("[INFO] No valid keypoints found for '%s' "
                      "in test_keypoint_augmentation()" % (str(aug),))
        assert np.average(dss) < 5.0, \
            "Average distance too high (%.2f, with ds: %s)" \
            % (np.average(dss), str(dss))
Пример #23
0
def train():
    BATCH_SIZE = 100

    network = Network()

    timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H%M%S")

    # create directory for saving models
    os.makedirs(os.path.join('save', network.description, timestamp))

    dataset = Dataset(folder='data{}_{}'.format(network.IMAGE_HEIGHT,
                                                network.IMAGE_WIDTH),
                      batch_size=BATCH_SIZE)

    inputs, targets = dataset.next_batch()
    print(inputs.shape, targets.shape)

    # augmentation_seq = iaa.Sequential([
    #     iaa.Crop(px=(0, 16)),  # crop images from each side by 0 to 16px (randomly chosen)
    #     iaa.Fliplr(0.5),  # horizontally flip 50% of the images
    #     iaa.GaussianBlur(sigma=(0, 2.0))  # blur images with a sigma of 0 to 3.0
    # ])

    augmentation_seq = iaa.Sequential([
        iaa.Crop(
            px=(0, 16), name="Cropper"
        ),  # crop images from each side by 0 to 16px (randomly chosen)
        iaa.Fliplr(0.5, name="Flipper"),
        iaa.GaussianBlur((0, 3.0), name="GaussianBlur"),
        iaa.Dropout(0.02, name="Dropout"),
        iaa.AdditiveGaussianNoise(scale=0.01 * 255, name="GaussianNoise"),
        iaa.Affine(translate_px={
            "x": (-network.IMAGE_HEIGHT // 3, network.IMAGE_WIDTH // 3)
        },
                   name="Affine")
    ])

    # change the activated augmenters for binary masks,
    # we only want to execute horizontal crop, flip and affine transformation
    def activator_binmasks(images, augmenter, parents, default):
        if augmenter.name in ["GaussianBlur", "Dropout", "GaussianNoise"]:
            return False
        else:
            # default value for all other augmenters
            return default

    hooks_binmasks = imgaug.HooksImages(activator=activator_binmasks)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        summary_writer = tf.summary.FileWriter('{}/{}-{}'.format(
            'logs', network.description, timestamp),
                                               graph=tf.get_default_graph())
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)

        test_accuracies = []
        # Fit all training data
        n_epochs = 500
        global_start = time.time()
        for epoch_i in range(n_epochs):
            dataset.reset_batch_pointer()

            for batch_i in range(dataset.num_batches_in_epoch()):
                batch_num = epoch_i * dataset.num_batches_in_epoch(
                ) + batch_i + 1

                augmentation_seq_deterministic = augmentation_seq.to_deterministic(
                )

                start = time.time()
                batch_inputs, batch_targets = dataset.next_batch()
                batch_inputs = np.reshape(
                    batch_inputs, (dataset.batch_size, network.IMAGE_HEIGHT,
                                   network.IMAGE_WIDTH, 1))
                batch_targets = np.reshape(
                    batch_targets, (dataset.batch_size, network.IMAGE_HEIGHT,
                                    network.IMAGE_WIDTH, 1))

                batch_inputs = augmentation_seq_deterministic.augment_images(
                    batch_inputs)
                batch_inputs = np.multiply(batch_inputs, 1.0 / 255)

                batch_targets = augmentation_seq_deterministic.augment_images(
                    batch_targets, hooks=hooks_binmasks)

                cost, _ = sess.run(
                    [network.cost, network.train_op],
                    feed_dict={
                        network.inputs: batch_inputs,
                        network.targets: batch_targets,
                        network.is_training: True
                    })
                end = time.time()
                print('{}/{}, epoch: {}, cost: {}, batch time: {}'.format(
                    batch_num, n_epochs * dataset.num_batches_in_epoch(),
                    epoch_i, cost, end - start))

                if batch_num % 100 == 0 or batch_num == n_epochs * dataset.num_batches_in_epoch(
                ):
                    test_inputs, test_targets = dataset.test_set
                    # test_inputs, test_targets = test_inputs[:100], test_targets[:100]

                    test_inputs = np.reshape(
                        test_inputs,
                        (-1, network.IMAGE_HEIGHT, network.IMAGE_WIDTH, 1))
                    test_targets = np.reshape(
                        test_targets,
                        (-1, network.IMAGE_HEIGHT, network.IMAGE_WIDTH, 1))
                    test_inputs = np.multiply(test_inputs, 1.0 / 255)

                    print(test_inputs.shape)
                    summary, test_accuracy = sess.run(
                        [network.summaries, network.accuracy],
                        feed_dict={
                            network.inputs: test_inputs,
                            network.targets: test_targets,
                            network.is_training: False
                        })

                    summary_writer.add_summary(summary, batch_num)

                    print('Step {}, test accuracy: {}'.format(
                        batch_num, test_accuracy))
                    test_accuracies.append((test_accuracy, batch_num))
                    print("Accuracies in time: ", [
                        test_accuracies[x][0]
                        for x in range(len(test_accuracies))
                    ])
                    max_acc = max(test_accuracies)
                    print("Best accuracy: {} in batch {}".format(
                        max_acc[0], max_acc[1]))
                    print("Total time: {}".format(time.time() - global_start))

                    # Plot example reconstructions
                    n_examples = 12
                    test_inputs, test_targets = dataset.test_inputs[:
                                                                    n_examples], dataset.test_targets[:
                                                                                                      n_examples]
                    test_inputs = np.multiply(test_inputs, 1.0 / 255)

                    test_segmentation = sess.run(
                        network.segmentation_result,
                        feed_dict={
                            network.inputs:
                            np.reshape(test_inputs, [
                                n_examples, network.IMAGE_HEIGHT,
                                network.IMAGE_WIDTH, 1
                            ])
                        })

                    # Prepare the plot
                    test_plot_buf = draw_results(test_inputs, test_targets,
                                                 test_segmentation,
                                                 test_accuracy, network,
                                                 batch_num)

                    # Convert PNG buffer to TF image
                    image = tf.image.decode_png(test_plot_buf.getvalue(),
                                                channels=4)

                    # Add the batch dimension
                    image = tf.expand_dims(image, 0)

                    # Add image summary
                    image_summary_op = tf.summary.image("plot", image)

                    image_summary = sess.run(image_summary_op)
                    summary_writer.add_summary(image_summary)

                    if test_accuracy >= max_acc[0]:
                        checkpoint_path = os.path.join('save',
                                                       network.description,
                                                       timestamp, 'model.ckpt')
                        saver.save(sess,
                                   checkpoint_path,
                                   global_step=batch_num)
Пример #24
0
def test_unusual_channel_numbers():
    reseed()

    images = [
        (0, create_random_images((4, 16, 16))),
        (1, create_random_images((4, 16, 16, 1))),
        (2, create_random_images((4, 16, 16, 2))),
        (4, create_random_images((4, 16, 16, 4))),
        (5, create_random_images((4, 16, 16, 5))),
        (10, create_random_images((4, 16, 16, 10))),
        (20, create_random_images((4, 16, 16, 20)))
    ]

    augs = [
        iaa.Add((-5, 5), name="Add"),
        iaa.AddElementwise((-5, 5), name="AddElementwise"),
        iaa.AdditiveGaussianNoise(0.01*255, name="AdditiveGaussianNoise"),
        iaa.Multiply((0.95, 1.05), name="Multiply"),
        iaa.Dropout(0.01, name="Dropout"),
        iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"),
        iaa.Invert(0.01, per_channel=True, name="Invert"),
        iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"),
        iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"),
        iaa.AverageBlur((3, 5), name="AverageBlur"),
        iaa.MedianBlur((3, 5), name="MedianBlur"),
        # iaa.BilateralBlur((3, 5), name="BilateralBlur"), # works only with 3/RGB channels
        # WithColorspace ?
        # iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"), # works only with 3/RGB channels
        # ChangeColorspace ?
        # iaa.Grayscale((0.0, 0.1), name="Grayscale"), # works only with 3 channels
        # Convolve ?
        iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
        iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
        iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
        iaa.DirectedEdgeDetect(alpha=(0.0, 0.1), direction=0,
                               name="DirectedEdgeDetect"),
        iaa.Fliplr(0.5, name="Fliplr"),
        iaa.Flipud(0.5, name="Flipud"),
        iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"),
        iaa.Affine(translate_percent=(-0.05, 0.05), name="Affine-translate-percent"),
        iaa.Affine(rotate=(-20, 20), name="Affine-rotate"),
        iaa.Affine(shear=(-20, 20), name="Affine-shear"),
        iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"),
        iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"),
        iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"),
        iaa.ElasticTransformation(alpha=(0.1, 0.2), sigma=(0.1, 0.2),
                                  name="ElasticTransformation"),
        iaa.Sequential([iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))]),
        iaa.SomeOf(1, [iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))]),
        iaa.OneOf([iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))]),
        iaa.Sometimes(0.5, iaa.Add((-5, 5)), name="Sometimes"),
        # WithChannels
        iaa.Noop(name="Noop"),
        # Lambda
        # AssertLambda
        # AssertShape
        iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"),
        iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"),
        iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"),
        iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Add(10),
                                name="SimplexNoiseAlpha"),
        iaa.Superpixels(p_replace=0.01, n_segments=64),
        iaa.Scale({"height": 4, "width": 4}, name="Scale"),
        iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
        iaa.Pad(px=(0, 10), name="Pad"),
        iaa.Crop(px=(0, 10), name="Crop")
    ]

    for aug in augs:
        for (nb_channels, images_c) in images:
            if aug.name != "Scale":
                images_aug = aug.augment_images(images_c)
                assert images_aug.shape == images_c.shape
                image_aug = aug.augment_image(images_c[0])
                assert image_aug.shape == images_c[0].shape
            else:
                images_aug = aug.augment_images(images_c)
                image_aug = aug.augment_image(images_c[0])
                if images_c.ndim == 3:
                    assert images_aug.shape == (4, 4, 4)
                    assert image_aug.shape == (4, 4)
                else:
                    assert images_aug.shape == (4, 4, 4, images_c.shape[3])
                    assert image_aug.shape == (4, 4, images_c.shape[3])
Пример #25
0
CUSTOM_FONTS = subprocess.run(
    "fc-list | sed -r -e 's/^(.+): .*$/\\1/g' | grep custom",
    stdout=subprocess.PIPE,
    shell=True).stdout.decode("utf-8").strip().split("\n")

FONTNAMES += CUSTOM_FONTS

with open("/usr/share/dict/words") as f:
    WORDS = f.read().splitlines()

AUGMENTOR = iaa.Sequential([
    iaa.OneOf([iaa.GaussianBlur(
        (0, 1.0)), iaa.AverageBlur(k=(1, 2))]),
    iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),
    iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)),
    iaa.Dropout((0.01, 0.03), per_channel=0.1),
    iaa.Add((-10, 10), per_channel=0.5),
    iaa.OneOf([
        iaa.Multiply((0.5, 1.5), per_channel=0.5),
        iaa.FrequencyNoiseAlpha(exponent=(-4, 0),
                                first=iaa.Multiply(
                                    (0.5, 1.5), per_channel=True),
                                second=iaa.ContrastNormalization((0.5, 2.0)))
    ]),
],
                           random_order=True)


class LineState(Enum):
    START = 0
    HEADLINE = 1
Пример #26
0
def test_determinism():
    reseed()

    images = [
        ia.quokka(size=(128, 128)),
        ia.quokka(size=(64, 64)),
        ia.imresize_single_image(skimage.data.astronaut(), (128, 256))
    ]
    keypoints = [
        ia.KeypointsOnImage([
            ia.Keypoint(x=20, y=10), ia.Keypoint(x=5, y=5), ia.Keypoint(x=10, y=43)
            ], shape=(50, 60, 3))
    ]

    augs = [
        iaa.Sequential([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
        iaa.SomeOf(1, [iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
        iaa.OneOf([iaa.Fliplr(1.0), iaa.Flipud(1.0)]),
        iaa.Sometimes(1.0, iaa.Fliplr(1)),
        iaa.WithColorspace("HSV", children=iaa.Add((-50, 50))),
        iaa.WithChannels([0], iaa.Add((-50, 50))),
        iaa.Noop(name="Noop-nochange"),
        iaa.Lambda(
            func_images=lambda images, random_state, parents, hooks: images,
            func_heatmaps=lambda heatmaps, random_state, parents, hooks: heatmaps,
            func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: keypoints_on_images,
            name="Lambda-nochange"
        ),
        iaa.AssertLambda(
            func_images=lambda images, random_state, parents, hooks: True,
            func_heatmaps=lambda heatmaps, random_state, parents, hooks: True,
            func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: True,
            name="AssertLambda-nochange"
        ),
        iaa.AssertShape(
            (None, None, None, 3),
            check_keypoints=False,
            name="AssertShape-nochange"
        ),
        iaa.Scale((0.5, 0.9)),
        iaa.CropAndPad(px=(-50, 50)),
        iaa.Pad(px=(1, 50)),
        iaa.Crop(px=(1, 50)),
        iaa.Fliplr(1.0),
        iaa.Flipud(1.0),
        iaa.Superpixels(p_replace=(0.25, 1.0), n_segments=(16, 128)),
        iaa.ChangeColorspace(to_colorspace="GRAY"),
        iaa.Grayscale(alpha=(0.1, 1.0)),
        iaa.GaussianBlur(1.0),
        iaa.AverageBlur(5),
        iaa.MedianBlur(5),
        iaa.Convolve(np.array([[0, 1, 0],
                               [1, -4, 1],
                               [0, 1, 0]])),
        iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0.8, 1.2)),
        iaa.Emboss(alpha=(0.1, 1.0), strength=(0.8, 1.2)),
        iaa.EdgeDetect(alpha=(0.1, 1.0)),
        iaa.DirectedEdgeDetect(alpha=(0.1, 1.0), direction=(0.0, 1.0)),
        iaa.Add((-50, 50)),
        iaa.AddElementwise((-50, 50)),
        iaa.AdditiveGaussianNoise(scale=(0.1, 1.0)),
        iaa.Multiply((0.6, 1.4)),
        iaa.MultiplyElementwise((0.6, 1.4)),
        iaa.Dropout((0.3, 0.5)),
        iaa.CoarseDropout((0.3, 0.5), size_percent=(0.05, 0.2)),
        iaa.Invert(0.5),
        iaa.ContrastNormalization((0.6, 1.4)),
        iaa.Affine(scale=(0.7, 1.3), translate_percent=(-0.1, 0.1),
                   rotate=(-20, 20), shear=(-20, 20), order=ia.ALL,
                   mode=ia.ALL, cval=(0, 255)),
        iaa.PiecewiseAffine(scale=(0.1, 0.3)),
        iaa.ElasticTransformation(alpha=0.5)
    ]

    for aug in augs:
        aug_det = aug.to_deterministic()
        images_aug1 = aug_det.augment_images(images)
        images_aug2 = aug_det.augment_images(images)
        kps_aug1 = aug_det.augment_keypoints(keypoints)
        kps_aug2 = aug_det.augment_keypoints(keypoints)
        assert array_equal_lists(images_aug1, images_aug2), \
            "Images not identical for %s" % (aug.name,)
        assert keypoints_equal(kps_aug1, kps_aug2), \
            "Keypoints not identical for %s" % (aug.name,)
Пример #27
0
 def __init__(self, image):
     self.img = image
     # 随机通道处理,加减100以内
     # self.aug_WithChannels = iaa.WithChannels((0,2), iaa.Add((-100, 100)))
     # 随机裁剪和填充,percent为裁剪与填充比例,负数为放大后裁剪,正数为缩小和填充,pad_mode为填充方式,pad_cval为当空白填充时,填充像素值
     self.aug_CropAndPad = iaa.CropAndPad(percent=(-0.05, 0.1),
                                          pad_mode=ia.ALL,
                                          pad_cval=(0, 255))
     # 随机水平翻转,参数为概率
     self.aug_Fliplr = iaa.Fliplr(0.5)
     # 随机垂直翻转,参数为概率
     self.aug_Flipud = iaa.Flipud(0.5)
     # 超像素表示,p_replace被超像素代替的百分比,n_segments分割块数
     self.aug_Superpixels = iaa.Superpixels(p_replace=(0, 1.0),
                                            n_segments=(20, 200))
     # 灰度化 (0.0,1.0),前者为偏彩色部分,后者为偏灰度部分,随机灰度化
     self.aug_GrayScale = iaa.Grayscale(alpha=(0.0, 0.6))
     # 高斯模糊
     self.aug_GaussianBlur = iaa.GaussianBlur(sigma=(0, 3.0))
     # 均值模糊,k为kernel size
     self.aug_AverageBlur = iaa.AverageBlur(k=(2, 7))
     # 中值模糊, k为kernel size
     self.aug_MedianBlur = iaa.MedianBlur(k=(3, 11))
     # 双边滤波,d为kernel size,sigma_color为颜色域标准差,sigma_space为空间域标准差
     self.aug_BilateralBlur = iaa.BilateralBlur(sigma_color=(0, 250),
                                                sigma_space=(0, 250),
                                                d=(3, 7))
     # 锐化
     self.aug_Sharpen = iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0))
     # 浮雕效果
     self.aug_Emboss = iaa.Emboss(alpha=(0.0, 1.0), strength=(0.0, 1.5))
     # 边缘检测
     self.aug_EdgeDetect = iaa.EdgeDetect(alpha=(0.0, 1.0))
     # 方向性边缘检测
     self.aug_DirectedEdgeDetece = iaa.DirectedEdgeDetect(alpha=(0.0, 1.0),
                                                          direction=(0.0,
                                                                     1.0))
     # 暴力叠加像素值,每个像素统一加一个值
     self.aug_Add = iaa.Add((-40, 40))
     # 暴力叠加像素值,每个像素加不同的值
     self.aug_AddElementwise = iaa.AddElementwise((-40, 40))
     # 随机高斯加性噪声
     self.aug_AdditiveGaussianNoise = iaa.AdditiveGaussianNoise(scale=(0.0,
                                                                       0.1 *
                                                                       255))
     # 暴力乘法,每个像素统一乘以一个值
     self.aug_Multiply = iaa.Multiply((0.8, 1.2))
     # 暴力乘法,每个像素乘以不同值
     self.aug_MultiplyElementwise = iaa.MultiplyElementwise((0.8, 1.2))
     # 随机dropout像素值
     self.aug_Dropout = iaa.Dropout(p=(0, 0.2))
     # 随机粗dropout,2*2方块像素被dropout
     self.aug_CoarseDropout = iaa.CoarseDropout(0.02, size_percent=0.5)
     # 50%的图片,p概率反转颜色
     self.aug_Invert = iaa.Invert(0.25, per_channel=0.5)
     # 对比度归一化
     self.aug_ContrastNormalization = iaa.ContrastNormalization((0.5, 1.5))
     # 仿射变换
     self.aug_Affine = iaa.Affine(rotate=(0, 20),
                                  scale={
                                      "x": (0.8, 1.2),
                                      "y": (0.8, 1.2)
                                  })
     # 仿射变换, 局部像素仿射扭曲
     self.aug_PiecewiseAffine = iaa.PiecewiseAffine(scale=(0.01, 0.05))
     # 单应性变换
     self.aug_PerspectiveTransform = iaa.PerspectiveTransform(scale=(0.01,
                                                                     0.1))
     # 弹性变换
     self.aug_ElasticTransformation = iaa.ElasticTransformation(alpha=(0,
                                                                       5.0),
                                                                sigma=0.25)
     # 简单的加噪,小黑块
     self.aug_SimplexNoiseAlpha = iaa.SimplexNoiseAlpha(
         iaa.OneOf([
             iaa.EdgeDetect(alpha=(0.0, 0.5)),
             iaa.DirectedEdgeDetect(alpha=(0.0, 0.5), direction=(0.0, 1.0)),
         ]))
     # 频域加噪,表现为色彩的块状变换
     self.aug_FrequencyNoiseAlpha = iaa.FrequencyNoiseAlpha(
         exponent=(-4, 0),
         first=iaa.Multiply((0.5, 1.5), per_channel=True),
         second=iaa.ContrastNormalization((0.5, 2.0)))
Пример #28
0
def test_dtype_preservation():
    reseed()

    size = (4, 16, 16, 3)
    images = [
        np.random.uniform(0, 255, size).astype(np.uint8),
        np.random.uniform(0, 65535, size).astype(np.uint16),
        np.random.uniform(0, 4294967295, size).astype(np.uint32),
        np.random.uniform(-128, 127, size).astype(np.int16),
        np.random.uniform(-32768, 32767, size).astype(np.int32),
        np.random.uniform(0.0, 1.0, size).astype(np.float32),
        np.random.uniform(-1000.0, 1000.0, size).astype(np.float16),
        np.random.uniform(-1000.0, 1000.0, size).astype(np.float32),
        np.random.uniform(-1000.0, 1000.0, size).astype(np.float64)
    ]

    default_dtypes = set([arr.dtype for arr in images])
    # Some dtypes are here removed per augmenter, because the respective
    # augmenter does not support them. This test currently only checks whether
    # dtypes are preserved from in- to output for all dtypes that are supported
    # per augmenter.
    # dtypes are here removed via list comprehension instead of
    # `default_dtypes - set([dtype])`, because the latter one simply never
    # removed the dtype(s) for some reason?!

    def _not_dts(dts):
        return [dt for dt in default_dtypes if dt not in dts]

    augs = [
        (iaa.Add((-5, 5), name="Add"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.AddElementwise((-5, 5), name="AddElementwise"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.AdditiveGaussianNoise(0.01*255, name="AdditiveGaussianNoise"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Multiply((0.95, 1.05), name="Multiply"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Dropout(0.01, name="Dropout"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Invert(0.01, per_channel=True, name="Invert"), default_dtypes),
        (iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"), default_dtypes),
        (iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"), _not_dts([np.float16])),
        (iaa.AverageBlur((3, 5), name="AverageBlur"), _not_dts([np.uint32, np.int32, np.float16])),
        (iaa.MedianBlur((3, 5), name="MedianBlur"), _not_dts([np.uint32, np.int32, np.float16, np.float64])),
        (iaa.BilateralBlur((3, 5), name="BilateralBlur"),
         _not_dts([np.uint16, np.uint32, np.int16, np.int32, np.float16, np.float64])),
        # WithColorspace ?
        # iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"), # works only with RGB/uint8
        # ChangeColorspace ?
        # iaa.Grayscale((0.0, 0.1), name="Grayscale"), # works only with RGB/uint8
        # Convolve ?
        (iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.DirectedEdgeDetect(alpha=(0.0, 0.1), direction=0, name="DirectedEdgeDetect"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.Fliplr(0.5, name="Fliplr"), default_dtypes),
        (iaa.Flipud(0.5, name="Flipud"), default_dtypes),
        (iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"), default_dtypes),
        (iaa.Affine(translate_percent=(-0.05, 0.05), name="Affine-translate-percent"), default_dtypes),
        (iaa.Affine(rotate=(-20, 20), name="Affine-rotate"), default_dtypes),
        (iaa.Affine(shear=(-20, 20), name="Affine-shear"), default_dtypes),
        (iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"), default_dtypes),
        (iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"), default_dtypes),
        # (iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"), not_dts([np.uint32])),
        (iaa.ElasticTransformation(alpha=(0.1, 0.2), sigma=(0.1, 0.2), name="ElasticTransformation"),
         _not_dts([np.float16])),
        (iaa.Sequential([iaa.Noop(), iaa.Noop()], name="SequentialNoop"), default_dtypes),
        (iaa.SomeOf(1, [iaa.Noop(), iaa.Noop()], name="SomeOfNoop"), default_dtypes),
        (iaa.OneOf([iaa.Noop(), iaa.Noop()], name="OneOfNoop"), default_dtypes),
        (iaa.Sometimes(0.5, iaa.Noop(), name="SometimesNoop"), default_dtypes),
        (iaa.Sequential([iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))], name="Sequential"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.SomeOf(1, [iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))], name="SomeOf"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.OneOf([iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))], name="OneOf"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Sometimes(0.5, iaa.Add((-5, 5)), name="Sometimes"), _not_dts([np.uint32, np.int32, np.float64])),
        # WithChannels
        (iaa.Noop(name="Noop"), default_dtypes),
        # Lambda
        # AssertLambda
        # AssertShape
        (iaa.Alpha((0.0, 0.1), iaa.Noop(), name="AlphaNoop"), default_dtypes),
        (iaa.AlphaElementwise((0.0, 0.1), iaa.Noop(), name="AlphaElementwiseNoop"), default_dtypes),
        (iaa.SimplexNoiseAlpha(iaa.Noop(), name="SimplexNoiseAlphaNoop"), default_dtypes),
        (iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Noop(), name="SimplexNoiseAlphaNoop"), default_dtypes),
        (iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Add(10), name="SimplexNoiseAlpha"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Superpixels(p_replace=0.01, n_segments=64), _not_dts([np.float16, np.float32])),
        (iaa.Scale({"height": 4, "width": 4}, name="Scale"),
         _not_dts([np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64])),
        (iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
         _not_dts([np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64])),
        (iaa.Pad(px=(0, 10), name="Pad"),
         _not_dts([np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64])),
        (iaa.Crop(px=(0, 10), name="Crop"),
         _not_dts([np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64]))
    ]

    for (aug, allowed_dtypes) in augs:
        # print("aug", aug.name)
        # print("allowed_dtypes", allowed_dtypes)
        for images_i in images:
            if images_i.dtype in allowed_dtypes:
                # print("image dt", images_i.dtype)
                images_aug = aug.augment_images(images_i)
                assert images_aug.dtype == images_i.dtype
            else:
                # print("image dt", images_i.dtype, "[SKIPPED]")
                pass
Пример #29
0
def example_heavy_augmentations():
    print("Example: Heavy Augmentations")
    import imgaug as ia
    from imgaug import augmenters as iaa

    # random example images
    images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8)

    # Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
    # e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.
    st = lambda aug: iaa.Sometimes(0.5, aug)

    # Define our sequence of augmentation steps that will be applied to every image
    # All augmenters with per_channel=0.5 will sample one value _per image_
    # in 50% of all cases. In all other cases they will sample new values
    # _per channel_.
    seq = iaa.Sequential(
        [
            iaa.Fliplr(0.5),  # horizontally flip 50% of all images
            iaa.Flipud(0.5),  # vertically flip 50% of all images
            st(iaa.Crop(
                percent=(0,
                         0.1))),  # crop images by 0-10% of their height/width
            st(iaa.GaussianBlur(
                (0, 3.0))),  # blur images with a sigma between 0 and 3.0
            st(
                iaa.AdditiveGaussianNoise(
                    loc=0, scale=(0.0, 0.05 * 255),
                    per_channel=0.5)),  # add gaussian noise to images
            st(iaa.Dropout(
                (0.0, 0.1),
                per_channel=0.5)),  # randomly remove up to 10% of the pixels
            st(iaa.Add(
                (-10, 10), per_channel=0.5
            )),  # change brightness of images (by -10 to 10 of original value)
            st(iaa.Multiply((0.5, 1.5), per_channel=0.5)
               ),  # change brightness of images (50-150% of original value)
            st(iaa.ContrastNormalization(
                (0.5, 2.0),
                per_channel=0.5)),  # improve or worsen the contrast
            st(iaa.Grayscale((0.0, 1.0))),  # blend with grayscale image
            st(
                iaa.Affine(
                    scale={
                        "x": (0.8, 1.2),
                        "y": (0.8, 1.2)
                    },  # scale images to 80-120% of their size, individually per axis
                    translate_px={
                        "x": (-16, 16),
                        "y": (-16, 16)
                    },  # translate by -16 to +16 pixels (per axis)
                    rotate=(-45, 45),  # rotate by -45 to +45 degrees
                    shear=(-16, 16),  # shear by -16 to +16 degrees
                    order=[
                        0,
                        1
                    ],  # use scikit-image's interpolation orders 0 (nearest neighbour) and 1 (bilinear)
                    cval=(
                        0, 1.0
                    ),  # if mode is constant, use a cval between 0 and 1.0
                    mode=ia.
                    ALL  # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                )),
            st(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)
               )  # apply elastic transformations with random strengths
        ],
        random_order=True  # do all of the above in random order
    )

    images_aug = seq.augment_images(images)

    # -----
    # Make sure that the example really does something
    assert not np.array_equal(images, images_aug)
Пример #30
0
    def createSequence(self):

        seq_affine = []
        seq_non_affine = []
        seq_noise = []

        def sometimes_affine(aug): return iaa.Sometimes(
            self.default_param['affine_probability']/100, aug)

        def sometimes_non_affine(aug): return iaa.Sometimes(
            self.default_param['non_affine_probability']/100, aug)

        def sometimes_noise(aug): return iaa.Sometimes(
            self.default_param['noise_probability']/100, aug)

        affine = {k: self.default_param[k] for k in self.default_param.keys() if (
            ('affine' in k) and (not 'non' in k) and self.default_param[k])}
        if affine != {}:
            affine_seq = {}
            for a in affine:
                if a == 'affine_flip_horizontal':
                    seq_affine.append(sometimes_affine(
                        iaa.Flipud(affine[a]/100)))
                elif a == 'affine_flip_vertical':
                    seq_affine.append(sometimes_affine(
                        iaa.Fliplr(affine[a]/100)))
                elif a == 'affine_rotate':
                    affine_seq['rotate'] = (-affine[a], affine[a])
                elif a == 'affine_scale':
                    affine_seq['scale'] = {
                        "x": (1, 1+affine[a]/100), "y": (1, 1+affine[a]/100)}
                elif a == 'affine_shear':
                    affine_seq['shear'] = (-affine[a], affine[a])
            if affine_seq != {}:
                affine_seq['mode'] = 'reflect'
                seq_affine.append(sometimes_affine(iaa.Affine(**affine_seq)))

        non_affine = {k: self.default_param[k] for k in self.default_param.keys() if (
            ('non_affine' in k) and self.default_param[k])}
        if non_affine != {}:
            for a in non_affine:
                if a == 'non_affine_brightness':
                    seq_non_affine.append(sometimes_non_affine(iaa.Multiply(
                        (1-non_affine[a]/100, 1+non_affine[a]/100), per_channel=True)))

                elif a == 'non_affine_contrast':
                    seq_non_affine.append(sometimes_non_affine(
                        iaa.ContrastNormalization((1-non_affine[a]/100, 1+non_affine[a]/100))))
                elif a == 'non_affine_emboss':
                    seq_non_affine.append(sometimes_non_affine(iaa.Emboss(
                        alpha=(non_affine[a]/200, non_affine[a]/100), strength=(0, non_affine[a]/50))))
                elif a == 'non_affine_grayscale':
                    seq_non_affine.append(sometimes_non_affine(
                        iaa.Grayscale(alpha=(non_affine[a]/200, non_affine[a]/100))))
                elif a == 'non_affine_saturation':
                    seq_non_affine.append(sometimes_non_affine(
                        iaa.AddToHueAndSaturation((-non_affine[a], non_affine[a]))))
                elif a == 'non_affine_shrpen':
                    seq_non_affine.append(sometimes_non_affine(iaa.Sharpen(alpha=(
                        non_affine[a]/200, non_affine[a]/100), lightness=(1-non_affine[a]/100, 1+non_affine[a]/100))))

        noise = {k: self.default_param[k] for k in self.default_param.keys() if (
            ('noise' in k) and self.default_param[k])}
        if noise != {}:
            for a in noise:
                if a == 'noise_blur':
                    seq_noise.append(sometimes_noise(
                        iaa.GaussianBlur((noise[a]/2, noise[a]))))
                elif a == 'noise_dropout':
                    seq_noise.append(sometimes_noise(
                        iaa.Dropout((noise[a]/200, noise[a]/100))))
                elif a == 'noise_frequency':
                    seq_noise.append(sometimes_noise(
                        iaa.FrequencyNoiseAlpha(exponent=(-noise[a], noise[a]))))
                elif a == 'noise_noise':
                    seq_noise.append(sometimes_noise(
                        iaa.AdditiveGaussianNoise((255*noise[a]/200, 255*noise[a]/100))))
                elif a == 'noise_salt_and_pepper':
                    seq_noise.append(sometimes_noise(
                        iaa.SaltAndPepper(p=(noise[a]/200, noise[a]/100))))

        return {'affine': seq_affine, 'non_affine': seq_non_affine, 'noise': seq_noise}