Пример #1
0
#image_seg_aug_dir = r"dataset\aug_val_anno"

if not os.path.exists(image_aug_dir):
    os.makedirs(image_aug_dir)
if not os.path.exists(image_seg_aug_dir):
    os.makedirs(image_seg_aug_dir)

# 变换的集合,每种变换生成一个新的图像样本及其segment
# 1.先分别缩放
transform_seqs1 = [
    iaa.Affine(scale=0.6),
    iaa.Affine(scale=0.8),
    iaa.Affine(scale=1.2),
    iaa.Affine(scale=1.4),
    iaa.Fliplr(),  #0.镜像翻转
    iaa.Flipud(),  #1.左右翻转
    iaa.Affine(rotate=20),  #旋转
    iaa.Affine(rotate=40),
    iaa.Affine(rotate=60),
    iaa.Affine(rotate=90),
    iaa.Affine(rotate=-20),
    iaa.Affine(rotate=-40),
    iaa.Affine(rotate=-60),
    # 先将图片从RGB变换到HSV,然后将H值增加10,然后再变换回RGB。
    iaa.WithColorspace(to_colorspace="HSV",
                       from_colorspace="RGB",
                       children=iaa.WithChannels(0, iaa.Add(10))),
    iaa.WithColorspace(to_colorspace="HSV",
                       from_colorspace="RGB",
                       children=iaa.WithChannels(0, iaa.Add(20))),
    iaa.WithColorspace(to_colorspace="HSV",
Пример #2
0
 def __init__(self):
     super(ImgAugTransform, self).__init__()
     self.seq = iaa.Sequential(children=[
         iaa.Sequential(children=[
             iaa.Fliplr(p=0.5, name="Fliplr"),
             iaa.Flipud(p=0.5, name="Flipud"),
             iaa.Sequential(children=[
                 iaa.Affine(scale={
                     "x": (0.9, 1.1),
                     "y": (0.9, 1.1)
                 },
                            translate_percent={
                                "x": (-0.05, 0.05),
                                "y": (-0.05, 0.05)
                            },
                            rotate=(-45, 45),
                            shear=(-16, 16),
                            order=iap.Choice([0, 1, 3],
                                             p=[0.15, 0.80, 0.05]),
                            mode="reflect",
                            name="Affine"),
                 iaa.Sometimes(p=0.01,
                               then_list=iaa.PiecewiseAffine(
                                   scale=(0.0, 0.01),
                                   nb_rows=(4, 20),
                                   nb_cols=(4, 20),
                                   order=iap.Choice([0, 1, 3],
                                                    p=[0.15, 0.80, 0.05]),
                                   mode="reflect",
                                   name="PiecewiseAffine"))
             ],
                            random_order=True,
                            name="GeomTransform"),
             iaa.Sequential(children=[
                 iaa.Sometimes(p=0.75,
                               then_list=iaa.Add(value=(-10, 10),
                                                 per_channel=0.5,
                                                 name="Brightness")),
                 iaa.Sometimes(p=0.05,
                               then_list=iaa.Emboss(alpha=(0.0, 0.5),
                                                    strength=(0.5, 1.2),
                                                    name="Emboss")),
                 iaa.Sometimes(p=0.1,
                               then_list=iaa.Sharpen(alpha=(0.0, 0.5),
                                                     lightness=(0.5, 1.2),
                                                     name="Sharpen")),
                 iaa.Sometimes(p=0.25,
                               then_list=iaa.ContrastNormalization(
                                   alpha=(0.5, 1.5),
                                   per_channel=0.5,
                                   name="ContrastNormalization"))
             ],
                            random_order=True,
                            name="ColorTransform"),
             iaa.Sequential(children=[
                 iaa.Sometimes(p=0.5,
                               then_list=iaa.AdditiveGaussianNoise(
                                   loc=0,
                                   scale=(0.0, 10.0),
                                   per_channel=0.5,
                                   name="AdditiveGaussianNoise")),
                 iaa.Sometimes(p=0.1,
                               then_list=iaa.SaltAndPepper(
                                   p=(0, 0.001),
                                   per_channel=0.5,
                                   name="SaltAndPepper"))
             ],
                            random_order=True,
                            name="Noise"),
             iaa.OneOf(children=[
                 iaa.Sometimes(p=0.05,
                               then_list=iaa.MedianBlur(k=3,
                                                        name="MedianBlur")),
                 iaa.Sometimes(p=0.05,
                               then_list=iaa.AverageBlur(
                                   k=(2, 4), name="AverageBlur")),
                 iaa.Sometimes(p=0.5,
                               then_list=iaa.GaussianBlur(
                                   sigma=(0.0, 2.0), name="GaussianBlur"))
             ],
                       name="Blur"),
         ],
                        random_order=True,
                        name="MainProcess")
     ])
Пример #3
0
def main():
    indice = 0

    seq = iaa.Sequential([
        iaa.Affine(rotate=(-45, 45), shear=(-8, 8)),
        iaa.GaussianBlur(sigma=(0.0, 3.0)),
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.Multiply((0.5, 1.5)),
    ])

    for filename in XML_files:
        filename = filename.replace('\\', '/').replace(
            chr(92), '/'
        )  # for substituiu as duas barras invertidas no final do diretorio

        print("Lendo Arquivo", filename)
        path, xml_file_name = os.path.split(filename)  # ?

        output = '{0}{1}.{2}'

        doc = minidom.parse(filename)
        img_paths = doc.getElementsByTagName('path')

        image_filename = img_paths[0].firstChild.data
        corrige_RGB(image_filename)

        print('------------>', image_filename)
        path, img_file_name = os.path.split(image_filename)
        img_file_name, ext = img_file_name.split('.')

        img = imageio.imread(image_filename)

        bndboxes = doc.getElementsByTagName('object')

        bounding_boxes = []
        boxes = []
        for bndbox in bndboxes:
            xmin = int(bndbox.getElementsByTagName('xmin')[0].firstChild.data)
            ymin = int(bndbox.getElementsByTagName('ymin')[0].firstChild.data)
            xmax = int(bndbox.getElementsByTagName('xmax')[0].firstChild.data)
            ymax = int(bndbox.getElementsByTagName('ymax')[0].firstChild.data)
            label = doc.getElementsByTagName('name')[0].firstChild.data

            xname = doc.getElementsByTagName('name')[0].firstChild.data
            xpose = doc.getElementsByTagName('pose')[0].firstChild.data
            xtruncated = doc.getElementsByTagName(
                'truncated')[0].firstChild.data
            xdifficult = doc.getElementsByTagName(
                'difficult')[0].firstChild.data
            boxes.append({
                "xmin": xmin,
                "xmax": xmax,
                "ymin": ymin,
                "ymax": ymax,
                "name": xname,
                "pose": xpose,
                "truncated": xtruncated,
                "difficult": xdifficult
            })

            bounding_boxes.append(
                BoundingBox(x1=xmin, x2=xmax, y1=ymin, y2=ymax, label=label))

        bbs = BoundingBoxesOnImage(bounding_boxes, shape=img.shape)

        width = img.shape[0]
        height = img.shape[1]
        depth = img.shape[2]

        # modifying_img = seq.augment_image(img)
        # #ia.imshow(modifying_img)
        # print("---> Gravando ", output.format(output_img, name,i,ext))
        # imageio.imwrite(output.format(output_img, name,i,ext), modifying_img)
        # print("finished convert L")
        # i=i+1

        i = indice
        print('------------>Gravando arquivo')
        print('               ', output.format(output_img, i, ext))
        imageio.imwrite(output.format(output_img, i, ext), img)

        annotation = ET.Element("annotation")
        ET.SubElement(annotation, "folder").text = folder
        ET.SubElement(annotation, "filename").text = '{}.{}'.format(i, ext)
        ET.SubElement(annotation,
                      "path").text = output.format(output_img, i, ext)
        ET.SubElement(annotation, "segmented").text = "0"

        source = ET.SubElement(annotation, "source")
        ET.SubElement(source, "database").text = "Unknown"

        size = ET.SubElement(annotation, "size")
        ET.SubElement(size, "width").text = str(width)
        ET.SubElement(size, "height").text = str(height)
        ET.SubElement(size, "depth").text = str(depth)

        for box in boxes:
            object = ET.SubElement(annotation, "object")
            ET.SubElement(object, "name").text = box["name"]
            ET.SubElement(object, "pose").text = box["pose"]
            ET.SubElement(object, "truncated").text = str(box["truncated"])
            ET.SubElement(object, "difficult").text = str(box["difficult"])
            bndbox = ET.SubElement(object, "bndbox")
            ET.SubElement(bndbox, "xmin").text = str(box["xmin"])
            ET.SubElement(bndbox, "xmax").text = str(box["xmax"])
            ET.SubElement(bndbox, "ymin").text = str(box["ymin"])
            ET.SubElement(bndbox, "ymax").text = str(box["ymax"])

        tree = ET.ElementTree(annotation)

        print('               ', output.format(output_xml, i, 'xml'))
        tree.write(output.format(output_xml, i, 'xml'))
        '''
        annotation = {'folder': folder, 
                     'filename': '{}.{}'.format(i,ext),
                     'path': output.format(output_img, i,ext), 
                     'source' : {'database' : 'Unknown'}, 
                     'size' : {'width': width, 'height': height, 'depth': depth}, 
                     'segmented': '0',  
                     'object': {
                                'name': xml_name,
                                'pose': xml_pose,
                                'truncated': xml_truncated,
                                'difficult' : xml_difficult,
                                'bndbox': boxes[0]
                         }
                    }
        xml = dicttoxml(annotation, custom_root='annotation', attr_type=False)  
        print('                             ',output.format(output_img, i,'xml'))
        with open(output.format(output_img, i,'xml'), 'w') as file:
            file.write(xml.decode("utf-8") )
         '''

        #Codigo OK sem box
        #images = [img, img, img, img, img, img, img, img, img]
        #image_auge = seq.augment_images(images)
        #ia.imshow(ia.draw_grid(image_auge, cols=3, rows=4))
        #for j in range(0, len(images)):
        #    i = i + 1
        #    imageio.imwrite(output.format(output_img, i,ext), image_auge[j])

        for j in range(0, 15):
            image_aug, bbs_aug = seq(image=img, bounding_boxes=bbs)
            image_after = draw_bbs(
                image_aug,
                bbs_aug.remove_out_of_image().clip_out_of_image(), 5)

            i = i + 1
            print('               ', output.format(output_img, i, ext))

            imageio.imwrite(output.format(output_img, i, ext), image_aug)

            annotation = ET.Element("annotation")
            ET.SubElement(annotation, "folder").text = folder
            ET.SubElement(annotation, "filename").text = '{}.{}'.format(i, ext)
            ET.SubElement(annotation,
                          "path").text = output.format(output_img, i, ext)
            ET.SubElement(annotation, "segmented").text = "0"

            source = ET.SubElement(annotation, "source")
            ET.SubElement(source, "database").text = "Unknown"

            size = ET.SubElement(annotation, "size")
            ET.SubElement(size, "width").text = str(image_aug.shape[0])
            ET.SubElement(size, "height").text = str(image_aug.shape[1])
            ET.SubElement(size, "depth").text = str(image_aug.shape[2])

            j = 0
            for box in bbs_aug.bounding_boxes:
                object = ET.SubElement(annotation, "object")
                ET.SubElement(object, "name").text = boxes[j]["name"]
                ET.SubElement(object, "pose").text = boxes[j]["pose"]
                ET.SubElement(object,
                              "truncated").text = str(boxes[j]["truncated"])
                ET.SubElement(object,
                              "difficult").text = str(boxes[j]["difficult"])
                bndbox = ET.SubElement(object, "bndbox")

                ET.SubElement(bndbox, "xmin").text = str(round(int(box.x1)))
                ET.SubElement(bndbox, "xmax").text = str(round(int(box.x2)))
                ET.SubElement(bndbox, "ymin").text = str(round(int(box.y1)))
                ET.SubElement(bndbox, "ymax").text = str(round(int(box.y2)))

                # ET.SubElement(bndbox, "xmin").text = str(box.x1)
                # ET.SubElement(bndbox, "xmax").text = str(box.x2)
                # ET.SubElement(bndbox, "ymin").text = str(box.y1)
                # ET.SubElement(bndbox, "ymax").text = str(box.y2)
                j = j + 1

            tree = ET.ElementTree(annotation)

            print('               ', output.format(output_xml, i, 'xml'))
            tree.write(output.format(output_xml, i, 'xml'))

        indice = i + 1

        print('OK')
Пример #4
0
def test_determinism():
    reseed()

    images = [
        ia.quokka(size=(128, 128)),
        ia.quokka(size=(64, 64)),
        ia.imresize_single_image(skimage.data.astronaut(), (128, 256))
    ]
    images.extend([ia.quokka(size=(16, 16))] * 20)

    keypoints = [
        ia.KeypointsOnImage([
            ia.Keypoint(x=20, y=10, vis=None, label=None), ia.Keypoint(x=5, y=5, vis=None, label=None),
            ia.Keypoint(x=10, y=43, vis=None, label=None)], shape=(50, 60, 3))
    ] * 20

    augs = [
        iaa.Sequential([iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.SomeOf(1, [iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.OneOf([iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.Sometimes(0.5, iaa.Fliplr(1.0)),
        iaa.WithColorspace("HSV", children=iaa.Add((-50, 50))),
        # iaa.WithChannels([0], iaa.Add((-50, 50))),
        # iaa.Noop(name="Noop-nochange"),
        # iaa.Lambda(
        #     func_images=lambda images, random_state, parents, hooks: images,
        #     func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: keypoints_on_images,
        #     name="Lambda-nochange"
        # ),
        # iaa.AssertLambda(
        #     func_images=lambda images, random_state, parents, hooks: True,
        #     func_keypoints=lambda keypoints_on_images, random_state, parents, hooks: True,
        #     name="AssertLambda-nochange"
        # ),
        # iaa.AssertShape(
        #     (None, None, None, 3),
        #     check_keypoints=False,
        #     name="AssertShape-nochange"
        # ),
        iaa.Resize((0.5, 0.9)),
        iaa.CropAndPad(px=(-50, 50)),
        iaa.Pad(px=(1, 50)),
        iaa.Crop(px=(1, 50)),
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.Superpixels(p_replace=(0.25, 1.0), n_segments=(16, 128)),
        # iaa.ChangeColorspace(to_colorspace="GRAY"),
        iaa.Grayscale(alpha=(0.1, 1.0)),
        iaa.GaussianBlur((0.1, 3.0)),
        iaa.AverageBlur((3, 11)),
        iaa.MedianBlur((3, 11)),
        # iaa.Convolve(np.array([[0, 1, 0],
        #                       [1, -4, 1],
        #                       [0, 1, 0]])),
        iaa.Sharpen(alpha=(0.1, 1.0), lightness=(0.8, 1.2)),
        iaa.Emboss(alpha=(0.1, 1.0), strength=(0.8, 1.2)),
        iaa.EdgeDetect(alpha=(0.1, 1.0)),
        iaa.DirectedEdgeDetect(alpha=(0.1, 1.0), direction=(0.0, 1.0)),
        iaa.Add((-50, 50)),
        iaa.AddElementwise((-50, 50)),
        iaa.AdditiveGaussianNoise(scale=(0.1, 1.0)),
        iaa.Multiply((0.6, 1.4)),
        iaa.MultiplyElementwise((0.6, 1.4)),
        iaa.Dropout((0.3, 0.5)),
        iaa.CoarseDropout((0.3, 0.5), size_percent=(0.05, 0.2)),
        iaa.Invert(0.5),
        iaa.ContrastNormalization((0.6, 1.4)),
        iaa.Affine(scale=(0.7, 1.3), translate_percent=(-0.1, 0.1),
                   rotate=(-20, 20), shear=(-20, 20), order=ia.ALL,
                   mode=ia.ALL, cval=(0, 255)),
        iaa.PiecewiseAffine(scale=(0.1, 0.3)),
        iaa.ElasticTransformation(alpha=0.5)
    ]

    augs_affect_geometry = [
        iaa.Sequential([iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.SomeOf(1, [iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.OneOf([iaa.Fliplr(0.5), iaa.Flipud(0.5)]),
        iaa.Sometimes(0.5, iaa.Fliplr(1.0)),
        iaa.Resize((0.5, 0.9)),
        iaa.CropAndPad(px=(-50, 50)),
        iaa.Pad(px=(1, 50)),
        iaa.Crop(px=(1, 50)),
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.Affine(scale=(0.7, 1.3), translate_percent=(-0.1, 0.1),
                   rotate=(-20, 20), shear=(-20, 20), order=ia.ALL,
                   mode=ia.ALL, cval=(0, 255)),
        iaa.PiecewiseAffine(scale=(0.1, 0.3)),
        iaa.ElasticTransformation(alpha=(5, 100), sigma=(3, 5))
    ]

    for aug in augs:
        aug_det = aug.to_deterministic()
        images_aug1 = aug_det.augment_images(images)
        images_aug2 = aug_det.augment_images(images)

        aug_det = aug.to_deterministic()
        images_aug3 = aug_det.augment_images(images)
        images_aug4 = aug_det.augment_images(images)

        assert array_equal_lists(images_aug1, images_aug2), \
            "Images (1, 2) expected to be identical for %s" % (aug.name,)

        assert array_equal_lists(images_aug3, images_aug4), \
            "Images (3, 4) expected to be identical for %s" % (aug.name,)

        assert not array_equal_lists(images_aug1, images_aug3), \
            "Images (1, 3) expected to be different for %s" % (aug.name,)

    for aug in augs_affect_geometry:
        aug_det = aug.to_deterministic()
        kps_aug1 = aug_det.augment_keypoints(keypoints)
        kps_aug2 = aug_det.augment_keypoints(keypoints)

        aug_det = aug.to_deterministic()
        kps_aug3 = aug_det.augment_keypoints(keypoints)
        kps_aug4 = aug_det.augment_keypoints(keypoints)

        assert keypoints_equal(kps_aug1, kps_aug2), \
            "Keypoints (1, 2) expected to be identical for %s" % (aug.name,)

        assert keypoints_equal(kps_aug3, kps_aug4), \
            "Keypoints (3, 4) expected to be identical for %s" % (aug.name,)

        assert not keypoints_equal(kps_aug1, kps_aug3), \
            "Keypoints (1, 3) expected to be different for %s" % (aug.name,)
Пример #5
0
def train(train_dir, val_dir):

    start_time = time.time()

    config = DESConfig()
    config.display()

    ## DATASET

    # Training dataset
    dataset_train = PhoSimDataset()
    dataset_train.load_sources(train_dir, dataset="training")
    dataset_train.prepare()

    # Validation dataset
    dataset_val = PhoSimDataset()
    dataset_val.load_sources(val_dir, dataset="validation")
    dataset_val.prepare()

    # Image augmentation
    augmentation = iaa.SomeOf((0, 4), [
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.OneOf([
            iaa.Affine(rotate=90),
            iaa.Affine(rotate=180),
            iaa.Affine(rotate=270)
        ]),
        iaa.GaussianBlur(sigma=(0.0, np.random.random_sample() * 4 + 2)),
        iaa.AddElementwise((-25, 25))
    ])

    # Create model in training mode
    model = modellib.MaskRCNN(mode="training",
                              config=config,
                              model_dir=MODEL_DIR)

    # Which weights to start with?
    init_with = "coco"  # imagenet, coco, or last

    if init_with == "imagenet":
        model.load_weights(model.get_imagenet_weights(), by_name=True)
    elif init_with == "coco":
        # Load weights trained on MS COCO, but skip layers that
        # are different due to the different number of classes
        # See README for instructions to download the COCO weights
        model.load_weights(COCO_MODEL_PATH,
                           by_name=True,
                           exclude=[
                               "mrcnn_class_logits", "mrcnn_bbox_fc",
                               "mrcnn_bbox", "mrcnn_mask"
                           ])
    elif init_with == "last":
        # Load the last model you trained and continue training
        model.load_weights(model.find_last(), by_name=True)

    # Train the head branches
    # Passing layers="heads" freezes all layers except the head
    # layers. You can also pass a regular expression to select
    # which layers to train by name pattern.
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                augmentation=augmentation,
                epochs=15,
                layers='heads')

    # Fine tune all layers
    # Passing layers="all" trains all layers. You can also
    # pass a regular expression to select which layers to
    # train by name pattern.
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE / 10,
                augmentation=augmentation,
                epochs=25,
                layers="all")

    # Do one more with an even lower learning rate
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE / 100,
                augmentation=augmentation,
                epochs=35,
                layers="all")

    # Final stage
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE / 1000,
                augmentation=augmentation,
                epochs=50,
                layers="all")

    # Save weights
    model_path = os.path.join(MODEL_DIR, "astro_rcnn_decam.h5")
    model.keras_model.save_weights(model_path)

    print("Done in %.2f hours." % float((time.time() - start_time) / 3600))

    return
Пример #6
0
        trainDataset.prepare()

        # load the validation dataset
        valDataset = LesionBoundaryDataset(IMAGE_PATHS, CLASS_NAMES)
        valDataset.load_lesions(valIdxs)
        valDataset.prepare()

        # initialize the training configuration
        config = LesionBoundaryConfig()
        config.display()

        # initialize the image augmentation process
        aug = iaa.SomeOf(
            (0, 2),
            [iaa.Fliplr(0.5),
             iaa.Flipud(0.5),
             iaa.Affine(rotate=(-10, 10))])

        # initialize the model and load the COCO weights so we can
        # perform fine-tuning
        model = modellib.MaskRCNN(mode="training",
                                  config=config,
                                  model_dir=LOGS_AND_MODEL_DIR)
        model.load_weights(COCO_PATH,
                           by_name=True,
                           exclude=[
                               "mrcnn_class_logits", "mrcnn_bbox_fc",
                               "mrcnn_bbox", "mrcnn_mask"
                           ])

        # train *just* the layer heads
Пример #7
0
def test_keypoint_augmentation():
    reseed()

    keypoints = []
    for y in range(40//5):
        for x in range(60//5):
            keypoints.append(ia.Keypoint(x=x * 5, y=y * 5, vis=None, label=None))

    keypoints_oi = ia.KeypointsOnImage(keypoints, shape=(40, 60, 3))
    keypoints_oi_empty = ia.KeypointsOnImage([], shape=(40, 60, 3))

    augs = [
        iaa.Add((-5, 5), name="Add"),
        iaa.AddElementwise((-5, 5), name="AddElementwise"),
        iaa.AdditiveGaussianNoise(0.01*255, name="AdditiveGaussianNoise"),
        iaa.Multiply((0.95, 1.05), name="Multiply"),
        iaa.Dropout(0.01, name="Dropout"),
        iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"),
        iaa.Invert(0.01, per_channel=True, name="Invert"),
        iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"),
        iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"),
        iaa.AverageBlur((3, 5), name="AverageBlur"),
        iaa.MedianBlur((3, 5), name="MedianBlur"),
        # iaa.BilateralBlur((3, 5), name="BilateralBlur"),
        # WithColorspace ?
        # iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"),
        # ChangeColorspace ?
        # Grayscale cannot be tested, input not RGB
        # Convolve ?
        iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
        iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
        iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
        iaa.DirectedEdgeDetect(alpha=(0.0, 0.1), direction=0, name="DirectedEdgeDetect"),
        iaa.Fliplr(0.5, name="Fliplr"),
        iaa.Flipud(0.5, name="Flipud"),
        iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"),
        iaa.Affine(translate_percent=(-0.05, 0.05), name="Affine-translate-percent"),
        iaa.Affine(rotate=(-20, 20), name="Affine-rotate"),
        iaa.Affine(shear=(-20, 20), name="Affine-shear"),
        iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"),
        iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"),
        # iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"),
        iaa.ElasticTransformation(alpha=(0.1, 0.2), sigma=(0.1, 0.2), name="ElasticTransformation"),
        # Sequential
        # SomeOf
        # OneOf
        # Sometimes
        # WithChannels
        # Noop
        # Lambda
        # AssertLambda
        # AssertShape
        iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"),
        iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"),
        iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"),
        iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Add(10),
                                name="SimplexNoiseAlpha"),
        iaa.Superpixels(p_replace=0.01, n_segments=64),
        iaa.Resize(0.5, name="Resize"),
        iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
        iaa.Pad(px=(0, 10), name="Pad"),
        iaa.Crop(px=(0, 10), name="Crop")
    ]

    for aug in augs:
        dss = []
        for i in range(10):
            aug_det = aug.to_deterministic()

            kp_fully_empty_aug = aug_det.augment_keypoints([])
            assert kp_fully_empty_aug == []

            kp_first_empty_aug = aug_det.augment_keypoints([keypoints_oi_empty])[0]
            assert len(kp_first_empty_aug.keypoints) == 0

            kp_image = keypoints_oi.to_keypoint_image(size=5)
            kp_image_aug = aug_det.augment_image(kp_image)
            kp_image_aug_rev = ia.KeypointsOnImage.from_keypoint_image(
                kp_image_aug,
                if_not_found_coords={"x": -9999, "y": -9999},
                nb_channels=1
            )
            kp_aug = aug_det.augment_keypoints([keypoints_oi])[0]
            ds = []
            assert len(kp_image_aug_rev.keypoints) == len(kp_aug.keypoints),\
                "Lost keypoints for '%s' (%d vs expected %d)" \
                % (aug.name, len(kp_aug.keypoints), len(kp_image_aug_rev.keypoints))
            for kp_pred, kp_pred_img in zip(kp_aug.keypoints, kp_image_aug_rev.keypoints):
                kp_pred_lost = (kp_pred.x == -9999 and kp_pred.y == -9999)
                kp_pred_img_lost = (kp_pred_img.x == -9999 and kp_pred_img.y == -9999)

                if not kp_pred_lost and not kp_pred_img_lost:
                    d = np.sqrt((kp_pred.x - kp_pred_img.x) ** 2
                                + (kp_pred.y - kp_pred_img.y) ** 2)
                    ds.append(d)
            dss.extend(ds)
            if len(ds) == 0:
                print("[INFO] No valid keypoints found for '%s' "
                      "in test_keypoint_augmentation()" % (str(aug),))
        assert np.average(dss) < 5.0, \
            "Average distance too high (%.2f, with ds: %s)" \
            % (np.average(dss), str(dss))
Пример #8
0
import argparse
import os
import threading
import cv2

import imgaug as ia
from imgaug import augmenters as iaa

from queue import Queue, Empty

from preprocessing import utils

ia.seed(1)

processes = {
    'flipVertical': {'both': True, 'seq': iaa.Flipud(1)},

    'flipHorizontal': {'both': True, 'seq': iaa.Fliplr(1)},

    'blur': {'both': False, 'seq': iaa.GaussianBlur(sigma=(0.0, 3.0))},

    'sharpen': {'both': False, 'seq': iaa.Sharpen(alpha=(0.0, 1.0), lightness=(0.75, 2.0))}
}

folders = ['train']


def run(arguments):
    input_folder = arguments.input_path
    output_path = arguments.output_path
    class_name = arguments.class_name
Пример #9
0
def train(model):
    """Train the model."""
    # Training dataset.
    dataset_train = BalloonDataset()
    dataset_train.load_balloon(args.dataset, "train")
    dataset_train.prepare()

    # Validation dataset
    dataset_val = BalloonDataset()
    dataset_val.load_balloon(args.dataset, "val")
    dataset_val.prepare()

    # Image augmentation
    # http://imgaug.readthedocs.io/en/latest/source/augmenters.html

    augmentation = iaa.SomeOf(
        (1, 5),
        [
            iaa.Fliplr(0.5),
            iaa.Flipud(0.5),
            iaa.Affine(rotate=90),  # new to force more horizontal lines
            iaa.Affine(
                rotate=(-90, 90), mode="edge"
            ),  # mode= "edge" ads straight lines in created empty space
            #for this do one of
            iaa.OneOf([
                iaa.CropAndPad(percent=(-0.3, 0.05),
                               sample_independently=False,
                               pad_mode="edge"),
                iaa.Crop(percent=(0, 0.05))
            ]),
            iaa.GaussianBlur(sigma=(0.0, 1.0)),
            iaa.Multiply((0.5, 1.2)),  #changeing brightness
            iaa.Grayscale(alpha=(0.0, 0.9))
        ])

    # *** This training schedule is an example. Update to your needs ***
    # Since we're using a very small dataset, and starting from
    # COCO trained weights, we don't need to train too long. Also,
    # no need to train all layers, just the heads should do it.
    print("Training network heads")
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=15,
                augmentation=augmentation,
                layers='heads')  # 'heads' or 'all'

    print("Training 4+")
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=60,
                augmentation=augmentation,
                layers='4+')  # 'heads' or 'all'

    print("Train all")
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=200,
                augmentation=augmentation,
                layers='all')  # 'heads' or 'all'

    print("Train all lower learning rate")
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE / 10,
                epochs=600,
                augmentation=augmentation,
                layers='all')  # 'heads' or 'all'
def img_aug_seg_multiclass(list_images_files, list_masks_files, image_shape, train_or_valid='train', img_aug_mode_rotate_flip=1,
                           img_aug_mode_contrast=False):
    # list_image a list of 3d numpy array (height,weight,channel)
    # (batch, size, size , 3) 一维数组batch个元素 (size, size , 3)
    list_images = my_image_helper.load_resize_images(list_images_files, image_shape)  # 训练文件列表

    if train_or_valid == 'test':  # 不做变换直接返回, 测试无标注
        return list_images

    if train_or_valid in ['train', 'valid']:
        # (batch, size, size , 1) 一维数组batch个元素 (size, size , 1) mask标注文件列
        list_masks = my_image_helper.load_resize_images(list_masks_files, image_shape, grayscale=True)

    if train_or_valid == 'valid':  # 不做变换直接返回, 验证有标注的
        return list_images, list_masks

    # 标注图像增强以后,0,255,会有一些1,2,3,4
    if train_or_valid == 'train':
        #不同img_aug_mode 定义 不同的 seq1
        # 1, don't rotate , 2 do rotate  (3,4) change contrast
        if img_aug_mode_rotate_flip == 1:
            sometimes = lambda aug: iaa.Sometimes(0.96, aug)
            seq1 = iaa.Sequential([
                # iaa.Crop(px=(0, 16)),  # crop images from each side by 0 to 16px (randomly chosen)
                iaa.Fliplr(0.5),  # horizontally flip 50% of the images
                iaa.Flipud(0.2),  # vertically  flip 50% of the images

                # iaa.Crop(px=(0, 10)),

                # sometimes(iaa.ContrastNormalization((0.92, 1.08), per_channel=0.5), ),
                sometimes(iaa.Affine(
                    # scale={"x": (0.92, 1.08), "y": (0.92, 1.08)},
                    translate_percent={"x": (-0.02, 0.02), "y": (-0.02, 0.02)},
                    # translate by -20 to +20 percent (per axis)
                    rotate=(0, 360),  # rotate by -45 to +45 degrees
                )),
            ])

        if img_aug_mode_rotate_flip == 2:
            sometimes = lambda aug: iaa.Sometimes(0.96, aug)
            seq1 = iaa.Sequential([
                # iaa.Crop(px=(0, 16)),  # crop images from each side by 0 to 16px (randomly chosen)
                iaa.Fliplr(0.5),  # horizontally flip 50% of the images
                iaa.Flipud(0.2),  # vertically  flip 50% of the images

                # iaa.Crop(px=(0, 6)),

                # sometimes(iaa.ContrastNormalization((0.92, 1.08), per_channel=0.5), ),
                sometimes(iaa.Affine(
                    # scale={"x": (0.92, 1.08), "y": (0.92, 1.08)},
                    translate_percent={"x": (-0.06, 0.06), "y": (-0.05, 0.05)},
                    # translate by -20 to +20 percent (per axis)
                    # rotate=(0, 360),  # rotate by -45 to +45 degrees
                )),
            ])

        if img_aug_mode_rotate_flip == 3:
            sometimes = lambda aug: iaa.Sometimes(0.96, aug)
            seq1 = iaa.Sequential([
                # iaa.Crop(px=(0, 16)),  # crop images from each side by 0 to 16px (randomly chosen)
                # iaa.Fliplr(0.5),  # horizontally flip 50% of the images
                # iaa.Flipud(0.2),  # vertically  flip 50% of the images

                # iaa.Crop(px=(0, 6)),

                # sometimes(iaa.ContrastNormalization((0.92, 1.08), per_channel=0.5), ),
                sometimes(iaa.Affine(
                    # scale={"x": (0.92, 1.08), "y": (0.92, 1.08)},
                    translate_percent={"x": (-0.06, 0.06), "y": (-0.05, 0.05)},
                    # translate by -20 to +20 percent (per axis)
                    # rotate=(0, 360),  # rotate by -45 to +45 degrees
                )),
            ])

        # The deterministic sequence will always apply the exactly same effects to the images.
        # 两次之间的变换一样, 但是每次批量内变换不一样
        seq_det = seq1.to_deterministic()

        images_aug = seq_det.augment_images(list_images)

        images_aug_annotations = []
        for file_mask in list_masks:
            list_mask = [file_mask]
            tmp_1 = seq_det.augment_images(list_mask)
            # cv2.imwrite('/tmp/000.jpg', tmp_1[0]) #test
            images_aug_annotations.append(tmp_1[0])

        # #图像可以在做一个变换,标注不用了,因为标注是黑白值
        if img_aug_mode_contrast:
            sometimes1 = lambda aug: iaa.Sometimes(0.96, aug)
            seq2 = iaa.Sequential([
                sometimes1(iaa.ContrastNormalization((0.92, 1.08), per_channel=0.5), ),
                # change brightness of images (by -5 to 5 of original value)
                # sometimes1(iaa.Add((-6, 6), per_channel=0.5),), #不能加这句,否则要坏
            ])

            images_aug = seq2.augment_images(images_aug)


        return images_aug, images_aug_annotations
Пример #11
0
def data_aug(img, bboxs=None, keypoints=None):
    '''
    :param img: 需要进行数据增强的图像
    :param bboxs: list, [ [x1, y1, x2, y2], ..., [xn1, yn1, xn2, yn2] ]
    :param keypoints: 关键点, COCO format or Ai-challenger format, list of list, [ [num_joints x 3], [num_joints x 3], ..., ]
    :return:
    '''
    is_flip = [random.randint(0, 1), random.randint(0, 1)]
    seq = iaa.Sequential([
        iaa.Multiply((0.7, 1.5)),
        iaa.Grayscale(iap.Choice(a=[0, 1], p=[0.8, 0.2]),
                      from_colorspace='BGR'),
        iaa.Fliplr(is_flip[0]),
        iaa.Flipud(is_flip[1]),
        iaa.Affine(rotate=(-15, 15), scale=(0.8, 1.2), mode='constant'),
    ])

    seq_det = seq.to_deterministic()
    bbs = None
    kps = None

    if bboxs is not None:
        assert type(bboxs) == type([])
        bbs = ia.BoundingBoxesOnImage([], shape=img.shape)
        for box in bboxs:
            bbs.bounding_boxes.append(
                ia.BoundingBox(x1=box[0], y1=box[1], x2=box[2], y2=box[3]))

    if keypoints is not None:
        kps = ia.KeypointsOnImage([], shape=img.shape)
        assert type(keypoints) == type([])
        for single_person_keypoints in keypoints:
            for i in range(14):
                joint = single_person_keypoints[i * 3:i * 3 + 3]
                kps.keypoints.append(ia.Keypoint(x=joint[0], y=joint[1]))

    img_aug = seq_det.augment_image(img)
    if bbs is not None:
        bbs_aug = seq_det.augment_bounding_boxes(bbs)
        bboxs = []
        for i in range(len(bbs_aug.bounding_boxes)):
            box_aug = bbs_aug.bounding_boxes[i]
            box = [box_aug.x1, box_aug.y1, box_aug.x2, box_aug.y2]
            bboxs.append(box)

    if kps is not None:
        kps_aug = seq_det.augment_keypoints(kps)
        kps_ori = copy.copy(keypoints)
        kps_ori = np.reshape(np.asarray(kps_ori), newshape=(-1, 14, 3))
        joint_nums = 14
        keypoints = []
        for i in range(len(kps_aug.keypoints)):
            point = kps_aug.keypoints[i]
            keypoints.append([point.x, point.y, 1])
            # single_keypoints.append([point.x, point.y, 1])
            # if len(single_keypoints) == joint_nums:
            #     keypoints.append(single_keypoints)
            #     single_keypoints = []
        keypoints = np.reshape(np.asarray(keypoints), newshape=(-1, 14, 3))
        # keep ori keypoint visiable attribute
        for i in range(kps_ori.shape[0]):
            for joint in range(kps_ori.shape[1]):
                keypoints[i][joint][2] = kps_ori[i][joint][2]

        # if flip, change keypoint order (left <-> right)
        # ai-format: [ 0-right_shoulder, 1-right_elbow, 2-right_wrist,
        #              3-left_shoulder, 4-left_elbow, 5-left_wrist,
        #              6-right_hip, 7-right_knee, 8-right_ankle,
        #              9-left_hip, 10-left_knee, 11-left_ankle,
        #              12-head, 13-neck ]
        # coco-format: TODO add coco-foramt change index
        change_index = [[0, 3], [1, 4], [2, 5], [6, 9], [7, 10], [8, 11]]
        for flip in is_flip:
            if flip:
                for i in range(kps_ori.shape[0]):
                    for index in change_index:
                        right_point = copy.copy(keypoints[i][index[0]])
                        keypoints[i][index[0]] = keypoints[i][index[1]]
                        keypoints[i][index[1]] = right_point
        keypoints = [
            list(np.reshape(single_person_keypoints, (-1, )))
            for single_person_keypoints in keypoints
        ]

    # test
    # if bbs is not None:
    #     img_before = bbs.draw_on_image(img, color=(0, 255, 0), thickness=2)
    #     img_after = bbs_aug.draw_on_image(img_aug, color=(0,0,255), thickness=2)
    #     cv2.imshow('box ori', img_before)
    #     cv2.imshow('box after', img_after)
    #     cv2.waitKey(0)
    # if kps is not None:
    #     img_before = kps.draw_on_image(img, color=(0, 255, 0), size=5)
    #     img_after = kps_aug.draw_on_image(img_aug, color=(0, 0, 255), size=5)
    #     for i in range(kps_ori.shape[0]):
    #         for joint in range(kps_ori.shape[1]):
    #             point = kps_ori[i][joint]
    #             cv2.putText(img_before, str(point[2]), (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 250), 1)
    #             point = keypoints[i][joint]
    #             cv2.putText(img_after, str(point[2]), (int(point[0]), int(point[1])), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 250), 1)
    #     cv2.imshow('kps ori', img_before)
    #     cv2.imshow('kps after', img_after)
    #     cv2.waitKey(0)

    return img_aug, bboxs, keypoints
def imgs_aug(list_image_files, image_shape=(299, 299, 3), train_or_valid='train',
             img_aug_mode_rotate_flip=1,
             img_aug_mode_contrast=False):
    # list_image a list of 3d numpy array (height,weight,channel)
    # (batch, size, size , 3) 一维数组batch个元素 (size, size , 3)
    list_images = my_image_helper.load_resize_images(list_image_files, image_shape)

    if train_or_valid == 'valid':
        return list_images

    if train_or_valid == 'test':
        return list_images

    if train_or_valid == 'train':
        if img_aug_mode_rotate_flip == 1:
            sometimes = lambda aug: iaa.Sometimes(0.96, aug)
            # https://github.com/aleju/imgaug/blob/master/images/examples_crop.jpg
            # https://github.com/aleju/imgaug/blob/master/images/examples_affine_translate.jpg
            seq = iaa.Sequential([
                # iaa.Crop(px=(0, 16)),  # crop images from each side by 0 to 16px (randomly chosen)
                # sometimes(iaa.CropAndPad(
                #     percent=(-0.04, 0.04),
                #     pad_mode=ia.ALL,
                #     pad_cval=(0, 255)
                # )),
                iaa.Fliplr(0.5),  # horizontally flip 50% of the images
                iaa.Flipud(0.2),  # horizontally flip 50% of the images
                # iaa.GaussianBlur(sigma=(0, 3.0)),  # blur images with a sigma of 0 to 3.0,
                # iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),  # sharpen images
                # sometimes(iaa.Crop(percent=(0, 0.1))),  # crop images by 0-10% of their height/width
                # shuortcut for CropAndPad

                # improve or worsen the contrast  If PCH is set to true, the process happens channel-wise with possibly different S.
                # sometimes1(iaa.ContrastNormalization((0.9, 1.1), per_channel=0.5), ),
                # change brightness of images (by -5 to 5 of original value)
                # sometimes1(iaa.Add((-6, 6), per_channel=0.5),),
                sometimes(iaa.Affine(
                    # scale={"x": (0.92, 1.08), "y": (0.92, 1.08)},
                    # scale images to 80-120% of their size, individually per axis
                    # Translation Shifts the pixels of the image by the specified amounts in the x and y directions
                    translate_percent={"x": (-0.02, 0.02), "y": (-0.02, 0.02)},
                    # translate by -20 to +20 percent (per axis)
                    rotate=(-10, 10),  # rotate by -10 to +10 degrees
                    # shear=(-16, 16),  # shear by -16 to +16 degrees
                    # order=[0, 1],  # use nearest neighbour or bilinear interpolation (fast)
                    # cval=(0, 255),  # if mode is constant, use a cval between 0 and 255
                    # mode=ia.ALL  # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                )),
            ])

        #  don't rotate
        elif img_aug_mode_rotate_flip == 2:
            sometimes = lambda aug: iaa.Sometimes(0.96, aug)
            # sometimes1 = lambda aug: iaa.Sometimes(0.96, aug)
            seq = iaa.Sequential([
                # iaa.Crop(px=(0, 16)),  # crop images from each side by 0 to 16px (randomly chosen)
                # sometimes(iaa.CropAndPad(
                #     percent=(-0.04, 0.04),
                #     pad_mode=ia.ALL,
                #     pad_cval=(0, 255)
                # )),
                iaa.Fliplr(0.5),  # horizontally flip 50% of the images
                iaa.Flipud(0.2),  # vertically  flip 50% of the images
                # iaa.GaussianBlur(sigma=(0, 3.0)),  # blur images with a sigma of 0 to 3.0,
                # iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),  # sharpen images
                # sometimes(iaa.Crop(percent=(0, 0.1))),  # crop images by 0-10% of their height/width
                # shuortcut for CropAndPad

                # improve or worsen the contrast  If PCH is set to true, the process happens channel-wise with possibly different S.
                # sometimes1(iaa.ContrastNormalization((0.92, 1.08), per_channel=0.5), ),
                # change brightness of images (by -5 to 5 of original value)
                # sometimes(iaa.Add((-5, 5), per_channel=0.5),),
                sometimes(iaa.Affine(
                    # scale={"x": (0.92, 1.08), "y": (0.92, 1.08)},
                    # scale images to 80-120% of their size, individually per axis
                    # Translation Shifts the pixels of the image by the specified amounts in the x and y directions
                    translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)},
                    # translate by -20 to +20 percent (per axis)
                    # rotate=(0, 360),  # rotate by -45 to +45 degrees
                    # shear=(-16, 16),  # shear by -16 to +16 degrees
                    # order=[0, 1],  # use nearest neighbour or bilinear interpolation (fast)
                    # cval=(0, 255),  # if mode is constant, use a cval between 0 and 255
                    # mode=ia.ALL  # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                )),
            ])

        #Fliplr(0.5) don't rotate, don't  flip
        elif img_aug_mode_rotate_flip == 3:
            sometimes = lambda aug: iaa.Sometimes(0.96, aug)
            # sometimes1 = lambda aug: iaa.Sometimes(0.96, aug)
            seq = iaa.Sequential([
                # iaa.Crop(px=(0, 16)),  # crop images from each side by 0 to 16px (randomly chosen)
                # sometimes(iaa.CropAndPad(
                #     percent=(-0.04, 0.04),
                #     pad_mode=ia.ALL,
                #     pad_cval=(0, 255)
                # )),
                # iaa.Fliplr(0.5),  # horizontally flip 50% of the images
                # iaa.Flipud(0.2),  # vertically  flip 50% of the images
                # iaa.GaussianBlur(sigma=(0, 3.0)),  # blur images with a sigma of 0 to 3.0,
                # iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),  # sharpen images
                # sometimes(iaa.Crop(percent=(0, 0.1))),  # crop images by 0-10% of their height/width
                # shuortcut for CropAndPad

                # improve or worsen the contrast  If PCH is set to true, the process happens channel-wise with possibly different S.
                # sometimes1(iaa.ContrastNormalization((0.92, 1.08), per_channel=0.5), ),
                # change brightness of images (by -5 to 5 of original value)
                # sometimes(iaa.Add((-5, 5), per_channel=0.5),),
                sometimes(iaa.Affine(
                    # scale={"x": (0.92, 1.08), "y": (0.92, 1.08)},
                    # scale images to 80-120% of their size, individually per axis
                    # Translation Shifts the pixels of the image by the specified amounts in the x and y directions
                    translate_percent={"x": (-0.05, 0.05), "y": (-0.05, 0.05)},
                    # translate by -20 to +20 percent (per axis)
                    # rotate=(0, 360),  # rotate by -45 to +45 degrees
                    # shear=(-16, 16),  # shear by -16 to +16 degrees
                    # order=[0, 1],  # use nearest neighbour or bilinear interpolation (fast)
                    # cval=(0, 255),  # if mode is constant, use a cval between 0 and 255
                    # mode=ia.ALL  # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                )),
            ])


        images_aug = seq.augment_images(list_images)

        # 做一个对比度的变换
        if img_aug_mode_contrast:
            sometimes1 = lambda aug: iaa.Sometimes(0.96, aug)
            seq2 = iaa.Sequential([
                sometimes1(iaa.ContrastNormalization((0.92, 1.08), per_channel=0.5), ),
                # change brightness of images (by -5 to 5 of original value)
                # sometimes1(iaa.Add((-6, 6), per_channel=0.5),), #不能加这句,否则要坏
            ])

            images_aug = seq2.augment_images(images_aug)

        return images_aug
Пример #13
0
def prepare_augmentation(args):
    """
    Created when declaring the data_loading pipeline
    :param args:
    :return:
    """
    import warnings
    warnings.warn("This function will be deprecated soon!")
    aug_dict = {}
    default = [
        "affine", "resize", "crop", "crop_to_fix", "pad", "flip", "brightness",
        "noise"
    ]
    # --------------------------------------Geometry---------------------------------------
    if args.do_affine:
        aug_dict.update({
            "affine": [
                augmenters.Affine(scale={
                    "x": args.scale_x,
                    "y": args.scale_y
                },
                                  rotate=args.rotation,
                                  translate_percent={
                                      "x": args.translation_x,
                                      "y": args.translation_y
                                  },
                                  shear=args.shear,
                                  cval=args.aug_bg_color,
                                  name="rand_affine"),
            ]
        })
    if args.do_resize:
        aug_dict.update({"resize": [augmenters.Resize(size=args.resize_size)]})
    if args.do_crop:
        crop_px = tuple(args.crop_pixel) if args.crop_pixel else None
        crop_pct = tuple(args.crop_percent) if args.crop_percent else None
        aug_dict.update({
            "crop": [
                augmenters.Crop(px=crop_px,
                                percent=crop_pct,
                                sample_independently=args.crop_samp_indp,
                                name="crop"),
            ]
        })
    if args.do_crop_to_fix_size:
        aug_dict.update({
            "crop_to_fix": [
                augmenters.CropToFixedSize(width=args.crop_size[1],
                                           height=args.crop_size[0],
                                           name="crop_to_fix_size"),
            ]
        })
    if args.do_pad_to_fix_size:
        aug_dict.update({
            "pad": [
                augmenters.PadToFixedSize(width=args.padding_size[1],
                                          height=args.padding_size[0],
                                          pad_cval=args.aug_bg_color,
                                          position=args.padding_position,
                                          name="pad_to_fix_size"),
            ]
        })
    if args.do_random_flip:
        aug_dict.update({
            "flip": [
                augmenters.Fliplr(args.h_flip_prob, name="horizontal_flip"),
                augmenters.Flipud(args.v_flip_prob, name="vertical_flip"),
            ]
        })
    # -------------------------------Color and Brightness--------------------------------
    # TODO: consider how to add Sometimes, OneOf #01/02
    if args.do_random_brightness:
        aug_dict.update({
            "brightness": [
                augmenters.ContrastNormalization(args.brightness_vibrator),
                augmenters.Multiply(args.multiplier,
                                    per_channel=args.multiplier_per_channel),
                augmenters.LinearContrast(alpha=args.linear_contrast),
            ]
        })
    if args.do_random_noise:
        aug_dict.update({
            "noise": [
                augmenters.GaussianBlur(sigma=args.gaussian_sigma),
                #augmenters.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
            ]
        })

    # ---------------------------------------Textures----------------------------------------
    # TODO: consider how to add Sometimes, OneOf #02/02

    # ---------------------------Combine Imgaug Process------------------------------
    if args.imgaug_order and type(args.imgaug_order) is list:
        # Remove repeated elements
        imgaug_order = list(set(args.imgaug_order))
        try:
            aug_list = [aug_dict[item] for item in imgaug_order]
            aug_list = list(itertools.chain.from_iterable(aug_list))
        except KeyError:
            not_contained = [
                key for key in imgaug_order if key not in aug_dict.keys()
            ]
            print(
                "%s in args.imgaug_order is not contained in the defined sequential: %s"
                % (not_contained, aug_dict.keys()))
            raise KeyError
        if len(imgaug_order) != len(aug_dict):
            not_contained = [
                key for key in aug_dict.keys() if key not in imgaug_order
            ]
            warnings.warn(
                "You did not specify the whole sequential order for imgaug, \n"
                "as the args.imgaug_order only has %s elements while aug_dict has %s elements, \n"
                "underdetermined operations are: %s \n"
                "omni_torch randomize the operations that does not contained in args.imgaug_order"
                % (len(imgaug_order), len(aug_dict), not_contained))
            not_contained = [
                aug_dict[key] for key in random.shuffle(not_contained)
            ]
            aug_list = list(itertools.chain.from_iterable(not_contained))
            seq = list(itertools.chain.from_iterable(
                aug_dict.values())) + aug_list
        else:
            seq = aug_list
    else:
        if args.imgaug_order == "default":
            seq = []
            for item in default:
                try:
                    seq += aug_dict[item]
                except KeyError:
                    continue
        else:
            # perform random shuffle
            seq = list(itertools.chain.from_iterable(aug_dict.values()))
            seq = random.shuffle(seq)
    return seq
Пример #14
0
                    iaa.Affine(
                        rotate=(-25, 25),
                    )
                ),
                iaa.Sometimes( (0.1 + lower_bound * 6),
                    iaa.Affine(
                        scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
                    )
                ),
                iaa.Fliplr(1.0), # Horizontal flips
            ], random_order=True) # apply augmenters in random order
            seq_lite1 = iaa.Sequential([  
                iaa.Fliplr(0.5), # Horizontal flips,
            ], random_order=True) # apply augmenters in random order
            seq_lite2 = iaa.Sequential([  
                iaa.Flipud(0.5), # Horizontal flips
            ], random_order=True) # apply augmenters in random order

            if data_input_type == 2 :
                images_aug = seq.augment_images(current_batch.astype(np.float32))
                current_batch = np.vstack((current_batch,images_aug)).astype(np.float32)
                current_batch_label = np.vstack((current_batch_label,current_batch_label)).astype(np.float32)
                input_sess_array = [cost_ad,accuracy_ad,correct_prediction_ad,grad_update_ad,concat_input]
                input_feed_dict={x:current_batch,y:current_batch_label,
                iter_variable:iter,learning_rate_dynamic:learning_rate,droprate1:random_drop1,droprate2:random_drop2,droprate3:random_drop3,batch_size_dynamic:batch_size}

            elif data_input_type == 0 :
                images_aug = seq.augment_images(current_batch.astype(np.float32))
                current_batch = np.vstack((current_batch,images_aug)).astype(np.float32)
                current_batch_label = np.vstack((current_batch_label,current_batch_label)).astype(np.float32)
                input_sess_array = [cost,accuracy,correct_prediction,grad_update]
Пример #15
0
# prepare the training dataset
dataset_train = DetectorDataset(image_fps_train, image_annotations, ORIG_SIZE,
                                ORIG_SIZE)
dataset_train.prepare()

# prepare the validation dataset
dataset_val = DetectorDataset(image_fps_val, image_annotations, ORIG_SIZE,
                              ORIG_SIZE)
dataset_val.prepare()

#### MODEL
model = modellib.MaskRCNN(mode='training', config=config, model_dir=ROOT_MODEL)

# Image augmentation
augmentation = iaa.Sequential([
    iaa.Sometimes(0.50, iaa.Fliplr(0.5)),
    iaa.Sometimes(0.50, iaa.Flipud(0.5)),
    iaa.Sometimes(0.30, iaa.CoarseSalt(p=0.10, size_percent=0.02)),
    iaa.Sometimes(0.30, iaa.Affine(rotate=(-25, 25))),
    iaa.Sometimes(0.30, iaa.GaussianBlur((0, 3.0)))
])

NUM_EPOCHS = 1
model.train(dataset_train,
            dataset_val,
            learning_rate=config.LEARNING_RATE,
            epochs=NUM_EPOCHS,
            layers='all',
            augmentation=augmentation)
# region setting
save_model_dir = '/tmp2/wide_angel'
train_type = 'wide_angle'
data_version = 'v4'
csv_train = Path(__file__).parent.parent.absolute().joinpath(
    'datafiles', data_version, 'train.csv')
csv_valid = Path(__file__).parent.parent.absolute().joinpath(
    'datafiles', data_version, 'valid.csv')
csv_test = Path(__file__).parent.parent.absolute().joinpath(
    'datafiles', data_version, 'test.csv')

iaa = iaa.Sequential([
    # iaa.CropAndPad(percent=(-0.04, 0.04)),
    iaa.Fliplr(0.5),
    iaa.Flipud(0.25),
    iaa.GaussianBlur(sigma=(0.0, 0.3)),
    iaa.MultiplyBrightness(mul=(0.7, 1.3)),
    iaa.contrast.LinearContrast((0.7, 1.3)),
    iaa.Sometimes(0.9, iaa.Add((-8, 8))),
    iaa.Sometimes(
        0.9,
        iaa.Affine(
            scale=(0.98, 1.02),
            translate_percent={
                "x": (-0.06, 0.06),
                "y": (-0.06, 0.06)
            },
            rotate=(-15, 15),
        )),
])
Пример #17
0
def get_seq():
    sometimes = lambda aug: iaa.Sometimes(0.5, aug)
    seq = iaa.Sequential(
        [
            # apply the following augmenters to most images
            iaa.Fliplr(0.5),  # horizontally flip 50% of all images
            iaa.Flipud(0.2),  # vertically flip 20% of all images
            sometimes(
                iaa.Affine(
                    scale={
                        "x": (0.9, 1.1),
                        "y": (0.9, 1.1)
                    },  # scale images to 80-120% of their size, individually per axis
                    translate_percent={
                        "x": (-0.1, 0.1),
                        "y": (-0.1, 0.1)
                    },  # translate by -20 to +20 percent (per axis)
                    rotate=(-10, 10),  # rotate by -45 to +45 degrees
                    shear=(-5, 5),  # shear by -16 to +16 degrees
                    order=[
                        0, 1
                    ],  # use nearest neighbour or bilinear interpolation (fast)
                    cval=(
                        0, 255
                    ),  # if mode is constant, use a cval between 0 and 255
                    mode=ia.
                    ALL  # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                )),
            # execute 0 to 5 of the following (less important) augmenters per image
            # don't execute all of them, as that would often be way too strong
            iaa.SomeOf(
                (0, 5),
                [
                    sometimes(
                        iaa.Superpixels(p_replace=(0, 1.0),
                                        n_segments=(20, 200))
                    ),  # convert images into their superpixel representation
                    iaa.OneOf([
                        iaa.GaussianBlur(
                            (0, 1.0
                             )),  # blur images with a sigma between 0 and 3.0
                        iaa.AverageBlur(
                            k=(3, 5)
                        ),  # blur image using local means with kernel sizes between 2 and 7
                        iaa.MedianBlur(
                            k=(3, 5)
                        ),  # blur image using local medians with kernel sizes between 2 and 7
                    ]),
                    iaa.Sharpen(alpha=(0, 1.0),
                                lightness=(0.9, 1.1)),  # sharpen images
                    iaa.Emboss(alpha=(0, 1.0),
                               strength=(0, 2.0)),  # emboss images
                    # search either for all edges or for directed edges,
                    # blend the result with the original image using a blobby mask
                    iaa.SimplexNoiseAlpha(
                        iaa.OneOf([
                            iaa.EdgeDetect(alpha=(0.5, 1.0)),
                            iaa.DirectedEdgeDetect(alpha=(0.5, 1.0),
                                                   direction=(0.0, 1.0)),
                        ])),
                    iaa.AdditiveGaussianNoise(
                        loc=0, scale=(0.0, 0.01 * 255),
                        per_channel=0.5),  # add gaussian noise to images
                    iaa.OneOf([
                        iaa.Dropout(
                            (0.01, 0.05), per_channel=0.5
                        ),  # randomly remove up to 10% of the pixels
                        iaa.CoarseDropout((0.01, 0.03),
                                          size_percent=(0.01, 0.02),
                                          per_channel=0.2),
                    ]),
                    iaa.Invert(0.01,
                               per_channel=True),  # invert color channels
                    iaa.Add(
                        (-2, 2), per_channel=0.5
                    ),  # change brightness of images (by -10 to 10 of original value)
                    iaa.AddToHueAndSaturation(
                        (-1, 1)),  # change hue and saturation
                    # either change the brightness of the whole image (sometimes
                    # per channel) or change the brightness of subareas
                    iaa.OneOf([
                        iaa.Multiply((0.9, 1.1), per_channel=0.5),
                        iaa.FrequencyNoiseAlpha(
                            exponent=(-1, 0),
                            first=iaa.Multiply((0.9, 1.1), per_channel=True),
                            second=iaa.ContrastNormalization((0.9, 1.1)))
                    ]),
                    sometimes(
                        iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)
                    ),  # move pixels locally around (with random strengths)
                    sometimes(iaa.PiecewiseAffine(scale=(
                        0.01,
                        0.05))),  # sometimes move parts of the image around
                    sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
                ],
                random_order=True)
        ],
        random_order=True)
    return seq
    for i in range(0, len(imgs)):
        try:
            # plt.imshow(images_aug[i])
            plt.imsave(imgpath[i], images_aug[i])
        except (ValueError), e:
            print e
            continue
        else:
            continue


seq = iaa.OneOf([
    iaa.Crop(
        px=(0,
            16)),  # crop images from each side by 0 to 16px (randomly chosen)
    iaa.Flipud(1),  #Flip vertically
    iaa.Fliplr(1),  # horizontally flip
    iaa.GaussianBlur(sigma=(0, 3.0)),  # blur images with a sigma of 0 to 3.0
    iaa.ContrastNormalization(
        (0.75, 1.5)),  # Strengthen or weaken the contrast in each image.
    iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255),
                              per_channel=0.5),  # Add gaussian noise.
    iaa.Multiply(
        (0.8, 1.2),
        per_channel=0.2),  # Make some images brighter and some darker.
    iaa.AdditiveGaussianNoise(scale=0.1 * 255),
    iaa.Affine(translate_px={"x": -40}
               ),  # Augmenter to apply affine transformations to images.
    iaa.Scale({
        "height": 512,
        "width": 512
Пример #19
0
 def __init__(self, p=0.5):
     super(IAAFlipud, self).__init__(1)
     self.processor = iaa.Flipud(p)
Пример #20
0
def _load_augmentation_aug_all():
    """ Load image augmentation model """
    def sometimes(aug):
        return iaa.Sometimes(0.5, aug)

    return iaa.Sequential(
        [
            # apply the following augmenters to most images
            iaa.Fliplr(0.5),  # horizontally flip 50% of all images
            iaa.Flipud(0.2),  # vertically flip 20% of all images
            # crop images by -5% to 10% of their height/width
            sometimes(
                iaa.CropAndPad(percent=(-0.05, 0.1),
                               pad_mode='constant',
                               pad_cval=(0, 255))),
            sometimes(
                iaa.Affine(
                    # scale images to 80-120% of their size, individually per axis
                    scale={
                        "x": (0.8, 1.2),
                        "y": (0.8, 1.2)
                    },
                    # translate by -20 to +20 percent (per axis)
                    translate_percent={
                        "x": (-0.2, 0.2),
                        "y": (-0.2, 0.2)
                    },
                    rotate=(-45, 45),  # rotate by -45 to +45 degrees
                    shear=(-16, 16),  # shear by -16 to +16 degrees
                    # use nearest neighbour or bilinear interpolation (fast)
                    order=[0, 1],
                    # if mode is constant, use a cval between 0 and 255
                    cval=(0, 255),
                    # use any of scikit-image's warping modes
                    # (see 2nd image from the top for examples)
                    mode='constant')),
            # execute 0 to 5 of the following (less important) augmenters per
            # image don't execute all of them, as that would often be way too
            # strong
            iaa.SomeOf(
                (0, 5),
                [
                    # convert images into their superpixel representation
                    sometimes(
                        iaa.Superpixels(p_replace=(0, 1.0),
                                        n_segments=(20, 200))),
                    iaa.OneOf([
                        # blur images with a sigma between 0 and 3.0
                        iaa.GaussianBlur((0, 3.0)),
                        # blur image using local means with kernel sizes
                        # between 2 and 7
                        iaa.AverageBlur(k=(2, 7)),
                        # blur image using local medians with kernel sizes
                        # between 2 and 7
                        iaa.MedianBlur(k=(3, 11)),
                    ]),
                    iaa.Sharpen(alpha=(0, 1.0),
                                lightness=(0.75, 1.5)),  # sharpen images
                    iaa.Emboss(alpha=(0, 1.0),
                               strength=(0, 2.0)),  # emboss images
                    # search either for all edges or for directed edges,
                    # blend the result with the original image using a blobby mask
                    iaa.BlendAlphaSimplexNoise(
                        iaa.OneOf([
                            iaa.EdgeDetect(alpha=(0.5, 1.0)),
                            iaa.DirectedEdgeDetect(alpha=(0.5, 1.0),
                                                   direction=(0.0, 1.0)),
                        ])),
                    # add gaussian noise to images
                    iaa.AdditiveGaussianNoise(
                        loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
                    iaa.OneOf([
                        # randomly remove up to 10% of the pixels
                        iaa.Dropout((0.01, 0.1), per_channel=0.5),
                        iaa.CoarseDropout((0.03, 0.15),
                                          size_percent=(0.02, 0.05),
                                          per_channel=0.2),
                    ]),
                    # invert color channels
                    iaa.Invert(0.05, per_channel=True),
                    # change brightness of images (by -10 to 10 of original value)
                    iaa.Add((-10, 10), per_channel=0.5),
                    # change hue and saturation
                    iaa.AddToHueAndSaturation((-20, 20)),
                    # either change the brightness of the whole image (sometimes
                    # per channel) or change the brightness of subareas
                    iaa.OneOf([
                        iaa.Multiply((0.5, 1.5), per_channel=0.5),
                        iaa.BlendAlphaFrequencyNoise(
                            exponent=(-4, 0),
                            foreground=iaa.Multiply(
                                (0.5, 1.5), per_channel=True),
                            background=iaa.contrast.LinearContrast((0.5, 2.0)))
                    ]),
                    # improve or worsen the contrast
                    iaa.contrast.LinearContrast((0.5, 2.0), per_channel=0.5),
                    iaa.Grayscale(alpha=(0.0, 1.0)),
                    # move pixels locally around (with random strengths)
                    sometimes(
                        iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)
                    ),
                    # sometimes move parts of the image around
                    sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))),
                    sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
                ],
                random_order=True)
        ],
        random_order=True)
Пример #21
0
def test_unusual_channel_numbers():
    reseed()

    images = [
        (0, create_random_images((4, 16, 16))),
        (1, create_random_images((4, 16, 16, 1))),
        (2, create_random_images((4, 16, 16, 2))),
        (4, create_random_images((4, 16, 16, 4))),
        (5, create_random_images((4, 16, 16, 5))),
        (10, create_random_images((4, 16, 16, 10))),
        (20, create_random_images((4, 16, 16, 20)))
    ]

    augs = [
        iaa.Add((-5, 5), name="Add"),
        iaa.AddElementwise((-5, 5), name="AddElementwise"),
        iaa.AdditiveGaussianNoise(0.01*255, name="AdditiveGaussianNoise"),
        iaa.Multiply((0.95, 1.05), name="Multiply"),
        iaa.Dropout(0.01, name="Dropout"),
        iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"),
        iaa.Invert(0.01, per_channel=True, name="Invert"),
        iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"),
        iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"),
        iaa.AverageBlur((3, 5), name="AverageBlur"),
        iaa.MedianBlur((3, 5), name="MedianBlur"),
        # iaa.BilateralBlur((3, 5), name="BilateralBlur"), # works only with 3/RGB channels
        # WithColorspace ?
        # iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"), # works only with 3/RGB channels
        # ChangeColorspace ?
        # iaa.Grayscale((0.0, 0.1), name="Grayscale"), # works only with 3 channels
        # Convolve ?
        iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
        iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
        iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
        iaa.DirectedEdgeDetect(alpha=(0.0, 0.1), direction=0,
                               name="DirectedEdgeDetect"),
        iaa.Fliplr(0.5, name="Fliplr"),
        iaa.Flipud(0.5, name="Flipud"),
        iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"),
        iaa.Affine(translate_percent=(-0.05, 0.05), name="Affine-translate-percent"),
        iaa.Affine(rotate=(-20, 20), name="Affine-rotate"),
        iaa.Affine(shear=(-20, 20), name="Affine-shear"),
        iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"),
        iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"),
        iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"),
        iaa.ElasticTransformation(alpha=(0.1, 0.2), sigma=(0.1, 0.2),
                                  name="ElasticTransformation"),
        iaa.Sequential([iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))]),
        iaa.SomeOf(1, [iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))]),
        iaa.OneOf([iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))]),
        iaa.Sometimes(0.5, iaa.Add((-5, 5)), name="Sometimes"),
        # WithChannels
        iaa.Noop(name="Noop"),
        # Lambda
        # AssertLambda
        # AssertShape
        iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"),
        iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"),
        iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"),
        iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Add(10),
                                name="SimplexNoiseAlpha"),
        iaa.Superpixels(p_replace=0.01, n_segments=64),
        iaa.Resize({"height": 4, "width": 4}, name="Resize"),
        iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
        iaa.Pad(px=(0, 10), name="Pad"),
        iaa.Crop(px=(0, 10), name="Crop")
    ]

    for aug in augs:
        for (nb_channels, images_c) in images:
            if aug.name != "Resize":
                images_aug = aug.augment_images(images_c)
                assert images_aug.shape == images_c.shape
                image_aug = aug.augment_image(images_c[0])
                assert image_aug.shape == images_c[0].shape
            else:
                images_aug = aug.augment_images(images_c)
                image_aug = aug.augment_image(images_c[0])
                if images_c.ndim == 3:
                    assert images_aug.shape == (4, 4, 4)
                    assert image_aug.shape == (4, 4)
                else:
                    assert images_aug.shape == (4, 4, 4, images_c.shape[3])
                    assert image_aug.shape == (4, 4, images_c.shape[3])
Пример #22
0
            class_vec_val = class_vec_map[label]
            # classes_seen.append(class_val)
            im = np.asarray(Image.open(imgs_folder + "processed/" +
                                       str(i))).astype(np.uint8)
            yield [im, class_val, class_vec_val]


# Define our sequence of augmentation steps that will be applied to every image
# All augmenters with per_channel=0.5 will sample one value _per image_
# in 50% of all cases. In all other cases they will sample new values
# _per channel_.
seq = iaa.Sequential(
    [
        # apply the following augmenters to most images
        iaa.Fliplr(0.5),  # horizontally flip 50% of all images
        iaa.Flipud(0.2),  # vertically flip 20% of all images
        # crop images by -5% to 10% of their height/width
        # sometimes(iaa.CropAndPad(
        # 	percent=(-0.05, 0.1),
        # 	pad_mode=ia.ALL,
        # 	pad_cval=(0, 255)
        # )),
        sometimes(
            iaa.Affine(
                translate_percent={
                    "x": (-0.2, 0.2),
                    "y": (-0.2, 0.2)
                },  # translate by -20 to +20 percent (per axis)
                rotate=(-90, 90),  # rotate by -45 to +45 degrees
                shear=(-10, 10),  # shear by -16 to +16 degrees
                # order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
Пример #23
0
def test_dtype_preservation():
    reseed()

    size = (4, 16, 16, 3)
    images = [
        np.random.uniform(0, 255, size).astype(np.uint8),
        np.random.uniform(0, 65535, size).astype(np.uint16),
        np.random.uniform(0, 4294967295, size).astype(np.uint32),
        np.random.uniform(-128, 127, size).astype(np.int16),
        np.random.uniform(-32768, 32767, size).astype(np.int32),
        np.random.uniform(0.0, 1.0, size).astype(np.float32),
        np.random.uniform(-1000.0, 1000.0, size).astype(np.float16),
        np.random.uniform(-1000.0, 1000.0, size).astype(np.float32),
        np.random.uniform(-1000.0, 1000.0, size).astype(np.float64)
    ]

    default_dtypes = set([arr.dtype for arr in images])
    # Some dtypes are here removed per augmenter, because the respective
    # augmenter does not support them. This test currently only checks whether
    # dtypes are preserved from in- to output for all dtypes that are supported
    # per augmenter.
    # dtypes are here removed via list comprehension instead of
    # `default_dtypes - set([dtype])`, because the latter one simply never
    # removed the dtype(s) for some reason?!

    def _not_dts(dts):
        return [dt for dt in default_dtypes if dt not in dts]

    augs = [
        (iaa.Add((-5, 5), name="Add"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.AddElementwise((-5, 5), name="AddElementwise"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.AdditiveGaussianNoise(0.01*255, name="AdditiveGaussianNoise"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Multiply((0.95, 1.05), name="Multiply"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Dropout(0.01, name="Dropout"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.CoarseDropout(0.01, size_px=6, name="CoarseDropout"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Invert(0.01, per_channel=True, name="Invert"), default_dtypes),
        (iaa.ContrastNormalization((0.95, 1.05), name="ContrastNormalization"), default_dtypes),
        (iaa.GaussianBlur(sigma=(0.95, 1.05), name="GaussianBlur"), _not_dts([np.float16])),
        (iaa.AverageBlur((3, 5), name="AverageBlur"), _not_dts([np.uint32, np.int32, np.float16])),
        (iaa.MedianBlur((3, 5), name="MedianBlur"), _not_dts([np.uint32, np.int32, np.float16, np.float64])),
        (iaa.BilateralBlur((3, 5), name="BilateralBlur"),
         _not_dts([np.uint16, np.uint32, np.int16, np.int32, np.float16, np.float64])),
        # WithColorspace ?
        # iaa.AddToHueAndSaturation((-5, 5), name="AddToHueAndSaturation"), # works only with RGB/uint8
        # ChangeColorspace ?
        # iaa.Grayscale((0.0, 0.1), name="Grayscale"), # works only with RGB/uint8
        # Convolve ?
        (iaa.Sharpen((0.0, 0.1), lightness=(1.0, 1.2), name="Sharpen"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.Emboss(alpha=(0.0, 0.1), strength=(0.5, 1.5), name="Emboss"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.EdgeDetect(alpha=(0.0, 0.1), name="EdgeDetect"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.DirectedEdgeDetect(alpha=(0.0, 0.1), direction=0, name="DirectedEdgeDetect"),
         _not_dts([np.uint32, np.int32, np.float16, np.uint32])),
        (iaa.Fliplr(0.5, name="Fliplr"), default_dtypes),
        (iaa.Flipud(0.5, name="Flipud"), default_dtypes),
        (iaa.Affine(translate_px=(-5, 5), name="Affine-translate-px"), _not_dts([np.uint32, np.int32])),
        (iaa.Affine(translate_percent=(-0.05, 0.05), name="Affine-translate-percent"), _not_dts([np.uint32, np.int32])),
        (iaa.Affine(rotate=(-20, 20), name="Affine-rotate"), _not_dts([np.uint32, np.int32])),
        (iaa.Affine(shear=(-20, 20), name="Affine-shear"), _not_dts([np.uint32, np.int32])),
        (iaa.Affine(scale=(0.9, 1.1), name="Affine-scale"), _not_dts([np.uint32, np.int32])),
        (iaa.PiecewiseAffine(scale=(0.001, 0.005), name="PiecewiseAffine"), default_dtypes),
        # (iaa.PerspectiveTransform(scale=(0.01, 0.10), name="PerspectiveTransform"), not_dts([np.uint32])),
        (iaa.ElasticTransformation(alpha=(0.1, 0.2), sigma=(0.1, 0.2), name="ElasticTransformation"),
         _not_dts([np.float16])),
        (iaa.Sequential([iaa.Noop(), iaa.Noop()], name="SequentialNoop"), default_dtypes),
        (iaa.SomeOf(1, [iaa.Noop(), iaa.Noop()], name="SomeOfNoop"), default_dtypes),
        (iaa.OneOf([iaa.Noop(), iaa.Noop()], name="OneOfNoop"), default_dtypes),
        (iaa.Sometimes(0.5, iaa.Noop(), name="SometimesNoop"), default_dtypes),
        (iaa.Sequential([iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))], name="Sequential"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.SomeOf(1, [iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))], name="SomeOf"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.OneOf([iaa.Add((-5, 5)), iaa.AddElementwise((-5, 5))], name="OneOf"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Sometimes(0.5, iaa.Add((-5, 5)), name="Sometimes"), _not_dts([np.uint32, np.int32, np.float64])),
        # WithChannels
        (iaa.Noop(name="Noop"), default_dtypes),
        # Lambda
        # AssertLambda
        # AssertShape
        (iaa.Alpha((0.0, 0.1), iaa.Noop(), name="AlphaNoop"), default_dtypes),
        (iaa.AlphaElementwise((0.0, 0.1), iaa.Noop(), name="AlphaElementwiseNoop"), default_dtypes),
        (iaa.SimplexNoiseAlpha(iaa.Noop(), name="SimplexNoiseAlphaNoop"), default_dtypes),
        (iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Noop(), name="SimplexNoiseAlphaNoop"), default_dtypes),
        (iaa.Alpha((0.0, 0.1), iaa.Add(10), name="Alpha"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.AlphaElementwise((0.0, 0.1), iaa.Add(10), name="AlphaElementwise"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.SimplexNoiseAlpha(iaa.Add(10), name="SimplexNoiseAlpha"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.FrequencyNoiseAlpha(exponent=(-2, 2), first=iaa.Add(10), name="SimplexNoiseAlpha"), _not_dts([np.uint32, np.int32, np.float64])),
        (iaa.Superpixels(p_replace=0.01, n_segments=64), _not_dts([np.float16, np.float32, np.float64])),
        (iaa.Resize({"height": 4, "width": 4}, name="Resize"),
         _not_dts([np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64])),
        (iaa.CropAndPad(px=(-10, 10), name="CropAndPad"),
         _not_dts([np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64])),
        (iaa.Pad(px=(0, 10), name="Pad"),
         _not_dts([np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64])),
        (iaa.Crop(px=(0, 10), name="Crop"),
         _not_dts([np.uint16, np.uint32, np.int16, np.int32, np.float32, np.float16, np.float64]))
    ]

    for (aug, allowed_dtypes) in augs:
        # print("aug", aug.name)
        # print("allowed_dtypes", allowed_dtypes)
        for images_i in images:
            if images_i.dtype in allowed_dtypes:
                # print("image dt", images_i.dtype)
                images_aug = aug.augment_images(images_i)
                assert images_aug.dtype == images_i.dtype
            else:
                # print("image dt", images_i.dtype, "[SKIPPED]")
                pass
Пример #24
0
 def processor(self):
     return iaa.Flipud(1)
Пример #25
0
def do_augmentation(D):
    """ D : Nx(n+p+1)xHxWx3. Return N1x(n+p+1)xHxWx3 """

    n_samples = D.shape[0]
    n_images_per_sample = D.shape[1]

    im_rows = D.shape[2]
    im_cols = D.shape[3]
    im_chnl = D.shape[4]

    E = D.reshape(n_samples * n_images_per_sample, im_rows, im_cols, im_chnl)

    sometimes = lambda aug: iaa.Sometimes(0.5, aug)

    # Very basic
    if True:
        seq = iaa.Sequential([
            sometimes(iaa.Crop(px=(
                0, 50
            ))),  # crop images from each side by 0 to 16px (randomly chosen)
            # iaa.Fliplr(0.5), # horizontally flip 50% of the images
            iaa.GaussianBlur(sigma=(0, 3.0)
                             ),  # blur images with a sigma of 0 to 3.0
            sometimes(
                iaa.Affine(scale={
                    "x": (0.8, 1.2),
                    "y": (0.8, 1.2)
                },
                           translate_percent={
                               "x": (-0.2, 0.2),
                               "y": (-0.2, 0.2)
                           },
                           rotate=(-25, 25),
                           shear=(-8, 8)))
        ])
        seq_vbasic = seq

    # Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
    # e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image.

    # Typical
    if True:
        seq = iaa.Sequential(
            [
                iaa.Fliplr(0.5),  # horizontal flips
                iaa.Crop(percent=(0, 0.1)),  # random crops
                # Small gaussian blur with random sigma between 0 and 0.5.
                # But we only blur about 50% of all images.
                iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 0.5))),
                # Strengthen or weaken the contrast in each image.
                iaa.ContrastNormalization((0.75, 1.5)),
                # Add gaussian noise.
                # For 50% of all images, we sample the noise once per pixel.
                # For the other 50% of all images, we sample the noise per pixel AND
                # channel. This can change the color (not only brightness) of the
                # pixels.
                iaa.AdditiveGaussianNoise(
                    loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
                # Make some images brighter and some darker.
                # In 20% of all cases, we sample the multiplier once per channel,
                # which can end up changing the color of the images.
                iaa.Multiply((0.8, 1.2), per_channel=0.2),
                # Apply affine transformations to each image.
                # Scale/zoom them, translate/move them, rotate them and shear them.
                iaa.Affine(scale={
                    "x": (0.8, 1.2),
                    "y": (0.8, 1.2)
                },
                           translate_percent={
                               "x": (-0.2, 0.2),
                               "y": (-0.2, 0.2)
                           },
                           rotate=(-25, 25),
                           shear=(-8, 8))
            ],
            random_order=True)  # apply augmenters in random order
        # seq = sometimes( seq )
        seq_typical = seq

    # Heavy
    if True:
        # Define our sequence of augmentation steps that will be applied to every image
        # All augmenters with per_channel=0.5 will sample one value _per image_
        # in 50% of all cases. In all other cases they will sample new values
        # _per channel_.
        seq = iaa.Sequential(
            [
                # apply the following augmenters to most images
                iaa.Fliplr(0.2),  # horizontally flip 20% of all images
                iaa.Flipud(0.2),  # vertically flip 20% of all images
                # crop images by -5% to 10% of their height/width
                sometimes(
                    iaa.CropAndPad(percent=(-0.05, 0.1),
                                   pad_mode=ia.ALL,
                                   pad_cval=(0, 255))),
                sometimes(
                    iaa.Affine(
                        scale={
                            "x": (0.8, 1.2),
                            "y": (0.8, 1.2)
                        },  # scale images to 80-120% of their size, individually per axis
                        translate_percent={
                            "x": (-0.2, 0.2),
                            "y": (-0.2, 0.2)
                        },  # translate by -20 to +20 percent (per axis)
                        rotate=(-45, 45),  # rotate by -45 to +45 degrees
                        shear=(-16, 16),  # shear by -16 to +16 degrees
                        order=[
                            0,
                            1
                        ],  # use nearest neighbour or bilinear interpolation (fast)
                        cval=(
                            0, 255
                        ),  # if mode is constant, use a cval between 0 and 255
                        mode=ia.
                        ALL  # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                    )),
                # execute 0 to 5 of the following (less important) augmenters per image
                # don't execute all of them, as that would often be way too strong
                iaa.SomeOf(
                    (0, 5),
                    [
                        sometimes(
                            iaa.Superpixels(p_replace=(0, 1.0),
                                            n_segments=(20, 200))
                        ),  # convert images into their superpixel representation
                        iaa.OneOf([
                            iaa.GaussianBlur(
                                (0, 3.0)
                            ),  # blur images with a sigma between 0 and 3.0
                            iaa.AverageBlur(
                                k=(2, 7)
                            ),  # blur image using local means with kernel sizes between 2 and 7
                            #iaa.MedianBlur(k=(3, 11)), # blur image using local medians with kernel sizes between 2 and 7
                        ]),
                        iaa.Sharpen(alpha=(0, 1.0),
                                    lightness=(0.75, 1.5)),  # sharpen images
                        iaa.Emboss(alpha=(0, 1.0),
                                   strength=(0, 2.0)),  # emboss images
                        # search either for all edges or for directed edges,
                        # blend the result with the original image using a blobby mask
                        iaa.SimplexNoiseAlpha(
                            iaa.OneOf([
                                iaa.EdgeDetect(alpha=(0.5, 1.0)),
                                iaa.DirectedEdgeDetect(alpha=(0.5, 1.0),
                                                       direction=(0.0, 1.0)),
                            ])),
                        iaa.AdditiveGaussianNoise(
                            loc=0, scale=(0.0, 0.05 * 255),
                            per_channel=0.5),  # add gaussian noise to images
                        iaa.OneOf([
                            iaa.Dropout(
                                (0.01, 0.1), per_channel=0.5
                            ),  # randomly remove up to 10% of the pixels
                            iaa.CoarseDropout((0.03, 0.15),
                                              size_percent=(0.02, 0.05),
                                              per_channel=0.2),
                        ]),
                        iaa.Invert(0.05,
                                   per_channel=True),  # invert color channels
                        iaa.Add(
                            (-10, 10), per_channel=0.5
                        ),  # change brightness of images (by -10 to 10 of original value)
                        iaa.AddToHueAndSaturation(
                            (-20, 20)),  # change hue and saturation
                        # either change the brightness of the whole image (sometimes
                        # per channel) or change the brightness of subareas
                        iaa.OneOf([
                            iaa.Multiply((0.5, 1.5), per_channel=0.5),
                            iaa.FrequencyNoiseAlpha(
                                exponent=(-4, 0),
                                first=iaa.Multiply(
                                    (0.5, 1.5), per_channel=True),
                                second=iaa.ContrastNormalization((0.5, 2.0)))
                        ]),
                        iaa.ContrastNormalization(
                            (0.5, 2.0),
                            per_channel=0.5),  # improve or worsen the contrast
                        iaa.Grayscale(alpha=(0.0, 1.0)),
                        sometimes(
                            iaa.ElasticTransformation(alpha=(0.5, 3.5),
                                                      sigma=0.25)
                        ),  # move pixels locally around (with random strengths)
                        sometimes(
                            iaa.PiecewiseAffine(scale=(0.01, 0.05))
                        ),  # sometimes move parts of the image around
                        sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1)))
                    ],
                    random_order=True)
            ],
            random_order=True)
        seq_heavy = seq

    print 'Add data'
    L = [E]
    print 'seq_vbasic'
    L.append(seq_vbasic.augment_images(E))
    print 'seq_typical'
    L.append(seq_typical.augment_images(E))
    print 'seq_typical'
    L.append(seq_typical.augment_images(E))
    print 'seq_heavy'
    L.append(seq_heavy.augment_images(E))

    G = [
        l.reshape(n_samples, n_images_per_sample, im_rows, im_cols, im_chnl)
        for l in L
    ]
    G = np.concatenate(G)
    print 'Input.shape ', D.shape, '\tOutput.shape ', G.shape
    return G

    # for j in range(n_times):
    #     images_aug = seq.augment_images(E)
    #     # L.append( images_aug.reshape( n_samples, n_images_per_sample, im_rows,im_cols,im_chnl  ) )
    #     L.append( images_aug )

    # code.interact( local=locals() )
    return L
Пример #26
0
import random

from skimage.transform import resize

#import tensorflow as tf

from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img

from PIL import Image

SEED_MAX = 1000
augments_dict = {
    "Flip-Horizontal":
    iaa.Fliplr(1.0),  #OK
    "Flip-Vertical":
    iaa.Flipud(1.0),  #OK
    "Blur":
    iaa.GaussianBlur(sigma=(0.5, 1.0)),  #OK
    "Contrast":
    iaa.LinearContrast(0.5, 1.0),  #OK
    "Noise":
    iaa.AdditiveGaussianNoise(loc=0, scale=(0.1, 0.05 * 255),
                              per_channel=True),  #OK
    "Colour-Grade":
    iaa.Multiply((0.8, 1.2), per_channel=True),
    "Rotate":
    iaa.Affine(rotate=(-30, 30), cval=255, mode="constant"
               ),  # rotate the image randomly between -360 and 360 degrees
    "Shear":
    iaa.Affine(shear=(-20, 20), cval=255,
               mode="constant"),  # shear the image between -45 and 45 degrees
Пример #27
0
def CustomImageDataGen(input_img):
    # Sometimes(0.5, ...) applies the given augmenter in 50% of all cases,
    # e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second
    # image.
    def sometimes(aug):
        return iaa.Sometimes(0.5, aug)

    seq = iaa.Sequential(
        [
            iaa.Fliplr(0.5),  # horizontal flips
            iaa.Flipud(0.2),  # vertical flips

            # Small gaussian blur with random sigma between 0 and 0.5.
            # But we only blur about 50% of all images.
            sometimes(iaa.GaussianBlur(sigma=(0, 2.0))),

            # crop images by -10% to 20% of their height/width
            sometimes(
                iaa.CropAndPad(
                    percent=(-0.1, 0.2), pad_mode=ia.ALL, pad_cval=(0, 255))),

            # Apply affine transformations to some of the images
            # - scale to 80-120% of image height/width (each axis independently)
            # - translate by -20 to +20 relative to height/width (per axis)
            # - rotate by -45 to +45 degrees
            # - shear by -16 to +16 degrees
            # - order: use nearest neighbour or bilinear interpolation (fast)
            # - mode: use any available mode to fill newly created pixels
            #         see API or scikit-image for which modes are available
            # - cval: if the mode is constant, then use a random brightness
            #         for the newly created pixels (e.g. sometimes black,
            #         sometimes white)
            sometimes(
                iaa.Affine(scale={
                    "x": (0.8, 1.5),
                    "y": (0.8, 1.5)
                },
                           translate_percent={
                               "x": (-0.2, 0.2),
                               "y": (-0.2, 0.2)
                           },
                           rotate=(-45, 45),
                           shear=(-16, 16),
                           order=[0, 1],
                           cval=(0, 255),
                           mode=ia.ALL)),

            # drop 2-5% percent of the original size, leading to large dropped
            # rectangles.
            sometimes(
                iaa.CoarseDropout(
                    (0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2)),

            # Make some images brighter and some darker.
            # In 20% of all cases, we sample the multiplier once per channel,
            # which can end up changing the color of the images.
            sometimes(iaa.Multiply((0.8, 1.2), per_channel=0.2)),

            # Improve or worsen the contrast of images.
            # Comment it out after third model run (extreme saturation)
            sometimes(iaa.ContrastNormalization((0.75, 1.5), per_channel=0.5)),
        ],
        # do all of the above augmentations in random order
        random_order=True)  # apply augmenters in random order

    output_img = seq.augment_image(input_img)
    return output_img
Пример #28
0
def gen_random_augment_with_mask():
    print("Generating random sequence for originals...")
    # generate a random seed
    global SEED_MAX
    random_seed = np.random.randint(low=1, high=SEED_MAX)
    ia.seed(random_seed)

    seq_img = iaa.Sequential(
        [
            iaa.Fliplr(0.5, random_state=1),  # horizontal flips
            iaa.Flipud(0.5, random_state=2),  # vertical flips
            iaa.Crop(percent=(0, 0.1), random_state=3),  # random crops
            # Small gaussian blur with random sigma between 0 and 0.5.
            # But we only blur about 50% of all images.
            iaa.Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 0.5))),

            # Strengthen or weaken the contrast in each image.
            iaa.contrast.LinearContrast((0.5, 1.25), random_state=4),
            # Add gaussian noise for 50% of the images.
            # For 50% of all images, we sample the noise once per pixel.
            # For the other 50% of all images, we sample the noise per pixel AND
            # channel. This can change the color (not only brightness) of the
            # pixels.
            iaa.Sometimes(0.5,
                          iaa.AdditiveGaussianNoise(
                              loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5),
                          random_state=5),
            # Make some images brighter and some darker.
            # In 20% of all cases, we sample the multiplier once per channel,
            # which can end up changing the color of the images.
            iaa.Multiply((0.8, 1.2), per_channel=0.2, random_state=6),
            # Apply affine transformations to each image.
            # Scale/zoom them, translate/move them, rotate them and shear them.
            iaa.Affine(
                scale={
                    "x": (0.8, 1.2),
                    "y": (0.8, 1.2)
                },  # random scaling from 80% to 120% of original size
                translate_percent={
                    "x": (0.0, 0.2),
                    "y": (0.0, 0.2)
                },  # translation from 0 translation to 20% of axis size
                rotate=(
                    -30, 30
                ),  # rotate the image randomly between -360 and 360 degrees
                shear=(-20, 20),
                cval=255,  #set cval to 255 to prevent any black areas occuring 
                mode='constant',
                random_state=7)
        ],
        random_state=8)  # apply augmenters in random order

    print("image aug sequence generated")

    print("Generating random sequence for masks...")
    seq_mask = iaa.Sequential(
        [
            iaa.Fliplr(0.5, random_state=1),
            iaa.Flipud(0.5, random_state=2),
            iaa.Crop(percent=(0, 0.1), random_state=3),
            iaa.Affine(
                scale={
                    "x": (0.8, 1.2),
                    "y": (0.8, 1.2)
                },  # random scaling from 80% to 120% of original size
                translate_percent={
                    "x": (0.0, 0.2),
                    "y": (0.0, 0.2)
                },  # translation from 0 translation to 20% of axis size
                rotate=(
                    -30, 30
                ),  # rotate the image randomly between -360 and 360 degrees
                shear=(-20, 20),
                cval=255,  #set cval to 255 to prevent any black areas occuring 
                mode='constant',
                random_state=7)
        ],
        random_state=8)

    print("masks aug sequence generated")

    return seq_img, seq_mask
Пример #29
0
 def __init__(self):
     self.imgaug_transform = iaa.Flipud(p=1)
     self.augmentor_op = Operations.Flip(probability=1,
                                         top_bottom_left_right="TOP_BOTTOM")
     self.solt_stream = slc.Stream([slt.Flip(p=1, axis=0)])
Пример #30
0
 def create_aug(self, *args, **kwargs):
     return iaa.Flipud(*args, **kwargs)