def get_image_augmenter():
    """
    :return: (iaa.Sequential) Image Augmenter
    """
    return iaa.Sequential(
        [
            # TODO: if flipped, reconstruct the flipped one
            # Sometimes(0.5, iaa.Fliplr(1)),
            # TODO: add shadows, see: https://markku.ai/post/data-augmentation/
            Sometimes(0.5, iaa.GaussianBlur(sigma=(0, 2.0))),
            Sometimes(0.5, iaa.MotionBlur(k=(3, 11), angle=(0, 360))),
            Sometimes(0.5, iaa.Sharpen(alpha=(0.0, 1.0),
                                       lightness=(0.75, 2.0))),
            Sometimes(0.4, iaa.Add((-15, 15), per_channel=0.5)),
            Sometimes(0.5, iaa.Multiply((0.6, 1.4), per_channel=0.5)),
            Sometimes(
                0.2,
                iaa.CoarseDropout(
                    (0.0, 0.05), size_percent=(0.02, 0.10), per_channel=0.5)),
            Sometimes(0.5,
                      iaa.ContrastNormalization((0.5, 1.8), per_channel=0.5)),
            Sometimes(0.1, iaa.AdditiveGaussianNoise(scale=10,
                                                     per_channel=True))
        ],
        random_order=True)
Exemple #2
0
 def load_images(self, ids: List[int]) -> Tensor:
     """
     Returns:
         (l, c, w, h) Tensor:
             Tensor of images.
     """
     images: List[Tensor] = [
         imread(self.frame_template.format(i)) for i in ids
     ]
     # Using deterministic parameters to apply exactly
     # the same augmentations to all images in the sequence.
     if self.augment:
         augmenter = Sequential([
             LinearContrast(Deterministic(Uniform(0.7, 1.3))),
             Multiply(
                 Deterministic(Uniform(0.7, 1.3)),
                 per_channel=Deterministic(DiscreteUniform(0, 1)),
             ),
             Sometimes(
                 Deterministic(DiscreteUniform(0, 1)),
                 GaussianBlur(sigma=Deterministic(Uniform(0, 0.7))),
             ),
         ],
                                random_order=True)
         images = augmenter(images=images)
     for i in range(len(images)):
         image = images[i].astype("float32") / 255
         images[i] = tensor(image).permute(2, 0, 1).unsqueeze_(0)
     return cat(images, dim=0)
Exemple #3
0
 def _aug_occl(self):
     from imgaug.augmenters import Sequential,SomeOf,OneOf,Sometimes,WithColorspace,WithChannels, \
         Noop,Lambda,AssertLambda,AssertShape,Scale,CropAndPad, \
         Pad,Crop,Fliplr,Flipud,Superpixels,ChangeColorspace, PerspectiveTransform, \
         Grayscale,GaussianBlur,AverageBlur,MedianBlur,Convolve, \
         Sharpen,Emboss,EdgeDetect,DirectedEdgeDetect,Add,AddElementwise, \
         AdditiveGaussianNoise,Multiply,MultiplyElementwise,Dropout, \
         CoarseDropout,Invert,ContrastNormalization,Affine,PiecewiseAffine, \
         ElasticTransformation
     return Sequential([Sometimes(0.7, CoarseDropout( p=0.4, size_percent=0.01) )])
Exemple #4
0
def Sometimes(aug):
    return Sometimes(0.5, aug)
NUM_IMAGES = 198
DIR_input = "./yellow_box_jpg/"
DIR_output = "./yellow_box_bboxed/"
OBJECT_CLASS = "1"  # 0: red button 1: yellow box
NUM_AUGMENTATIONS = 5
BACKGROUND_PATH = "./background/VOCdevkit/VOC2012/JPEGImages/"

# augmentation settings
seq = iaa.Sequential([
    #Sometimes(0.5, PerspectiveTransform(0.05)),
    #Sometimes(0.5, CropAndPad(percent=(-0.05, 0.1))),
    #Sometimes(0.5, Affine(scale=(1.0, 1.2))),
    #Sometimes(0.5, Affine(rotate=(-180, 180))),
    #Sometimes(0.5, CoarseDropout( p=0.1, size_percent=0.02) ), # Put this one in a separate layer to apply after background
    Sometimes(0.5, GaussianBlur(1.2*np.random.rand())),
    Sometimes(0.5, Add((-60, 60), per_channel=0.3)),
	#Sometimes(0.5, ChangeColorTemperature((1100, 10000), from_colorspace='RGB')),
    #Sometimes(0.3, Invert(0.2, per_channel=True)),
    Sometimes(0.5, Multiply((0.6, 1.4), per_channel=0.5)),
    Sometimes(0.5, Multiply((0.6, 1.4))),
    Sometimes(0.5, ContrastNormalization((0.5, 2.2), per_channel=0.3))
    ], random_order=False)

affine = iaa.Affine(
        scale=(0.5, 1.5),
        translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)},
        rotate=(-180, 180),
        #shear=(-8, 8)
    )
Exemple #6
0
    https://www.tensorflow.org/api_guides/python/train: tf.train API for building optimizers and other utilities like learning rate decay policies 
    https://www.tensorflow.org/api_docs/python/tf/layers: tf.layers API for building the model, other high level apis like SLIM can be used, thus the SLIM defined models like the popular RESNET or MOBILENETS is customizable and can be used out of the box
    
    !!!! MAKE SURE YOU SPECIFY THE CLASS_WEIGHTS PARAM PROPERLY IN 'config.json' when training a YOLO DTT i.e. num classes has to match the number of class weights

    If doing quantized training with toco compatibility there are a few constraints, namely:

        1. Minimum Tensorflow version has to be 1.13
        2. The current supported non-linearity elements are relu and relu6
        3. The convolution op instantiation has to have the non-linearity element set via the 'activation' argument
        4. The batch normalization op instantiation has to have the 'fused' argument set to false

'''

AUG_PIPE = Sequential([
    Sometimes(0.33, Affine(rotate=(-5, 5))),
    Sometimes(0.33, Affine(scale={
        "x": (0.85, 1.15),
        "y": (0.85, 1.15)
    })),
    Sometimes(0.33, Affine(shear=(-8, 8))),
    Sometimes(0.33, Multiply((0.9, 1.1))),
    Sometimes(0.33, AdditiveGaussianNoise(loc=0, scale=(0.0, 0.1 * 255))),
    Sometimes(0.33, ContrastNormalization((0.9, 1.1)))
],
                      random_order=True)

AUG_PIPE = None  # this is how you skip doing any augmentations, otherwise just have AUG_PIPE contain a imgaug Sequential instance


def model_function(images, train_flag):