Esempio n. 1
0
class Compose(object):
    """Composes several augmenters together.
    Unlike original pytorch implementation, it accepts a second argument (of mask type).

    Args:
        augmenters: list of imgaug.augmenters to compose.

    """
    def __init__(self, augmenters):
        assert type(augmenters) == list, 'augmenters should be of type `list`'
        self.augmenters = Sequential(augmenters)

    def __call__(self, img, masks=None, classes=None, boxes=None):
        if masks is not None:
            returns = self.augment(img, masks, classes, boxes)
            while returns[1].shape[-1] == 0:
                returns = self.augment(img, masks, classes, boxes)
            return returns
        return self.augmenters.augment_image(img)

    def augment(self, img, masks=None, classes=None, boxes=None):
        returns = []
        aug_det = self.augmenters.to_deterministic()

        # augment image
        input_size = img.shape
        img = aug_det.augment_image(img)
        returns.append(img)

        # augment masks
        new_masks = aug_det.augment_image(masks)
        null_masks = new_masks.sum(axis=0).sum(axis=0) == 0
        new_masks = new_masks[:, :, ~null_masks]
        returns.append(new_masks)

        # if removed any mask, remove corresponding class
        if classes is not None:
            classes = classes[~null_masks]
            returns.append(classes)

        if boxes is not None:
            # augment boxes
            bboxes = BoundingBoxesOnImage([BoundingBox(*box) for box in boxes],
                                          input_size)
            new_bboxes = aug_det.augment_bounding_boxes([bboxes])[0]
            new_bboxes = new_bboxes.remove_out_of_image().cut_out_of_image()
            boxes = [[box.x1, box.y1, box.x2, box.y2]
                     for box in new_bboxes.bounding_boxes]
            boxes = np.array(boxes)
            returns.append(boxes)
        return tuple(returns)
Esempio n. 2
0
def resize(imageDimensions):
    return Sequential([
        CenterPadToAspectRatio(float(imageDimensions[0]) /
                               float(imageDimensions[1]),
                               pad_mode='edge'),
        Resize(imageDimensions, interpolation=INTER_AREA)
    ])
Esempio n. 3
0
 def load_images(self, ids: List[int]) -> Tensor:
     """
     Returns:
         (l, c, w, h) Tensor:
             Tensor of images.
     """
     images: List[Tensor] = [
         imread(self.frame_template.format(i)) for i in ids
     ]
     # Using deterministic parameters to apply exactly
     # the same augmentations to all images in the sequence.
     if self.augment:
         augmenter = Sequential([
             LinearContrast(Deterministic(Uniform(0.7, 1.3))),
             Multiply(
                 Deterministic(Uniform(0.7, 1.3)),
                 per_channel=Deterministic(DiscreteUniform(0, 1)),
             ),
             Sometimes(
                 Deterministic(DiscreteUniform(0, 1)),
                 GaussianBlur(sigma=Deterministic(Uniform(0, 0.7))),
             ),
         ],
                                random_order=True)
         images = augmenter(images=images)
     for i in range(len(images)):
         image = images[i].astype("float32") / 255
         images[i] = tensor(image).permute(2, 0, 1).unsqueeze_(0)
     return cat(images, dim=0)
def save_aug_file(seq:iaa.Sequential, pathstr:str):
    for dirr in os.listdir(directory):
        for image in os.listdir(directory + dirr):
            imgname1 = directory + dirr+"/"+image
            img = cv2.imread(imgname1)
            img_aug = seq.augment_image(img)
            path = dir_res + pathstr+"/"+dirr+"/"
            pathname = os.makedirs(path, exist_ok=True)
            cv2.imwrite(path+image, img_aug)
Esempio n. 5
0
 def _aug_occl(self):
     from imgaug.augmenters import Sequential,SomeOf,OneOf,Sometimes,WithColorspace,WithChannels, \
         Noop,Lambda,AssertLambda,AssertShape,Scale,CropAndPad, \
         Pad,Crop,Fliplr,Flipud,Superpixels,ChangeColorspace, PerspectiveTransform, \
         Grayscale,GaussianBlur,AverageBlur,MedianBlur,Convolve, \
         Sharpen,Emboss,EdgeDetect,DirectedEdgeDetect,Add,AddElementwise, \
         AdditiveGaussianNoise,Multiply,MultiplyElementwise,Dropout, \
         CoarseDropout,Invert,ContrastNormalization,Affine,PiecewiseAffine, \
         ElasticTransformation
     return Sequential([Sometimes(0.7, CoarseDropout( p=0.4, size_percent=0.01) )])
Esempio n. 6
0
    def resize_image_and_bounding_boxes(combined_image, bounding_boxes):
        combined_image = np_array(combined_image)
        bounding_boxes = BoundingBoxesOnImage([
            BoundingBox(*coordinates, label=identifier)
            for identifier, coordinates in bounding_boxes.items()
        ],
                                              shape=combined_image.shape)

        combined_image, bounding_boxes = Sequential([iaaResize(
            (416, 416))])(image=combined_image, bounding_boxes=bounding_boxes)

        combined_image = Image.fromarray(combined_image)
        bounding_boxes = {
            b.label: (b.x1_int, b.y1_int, b.x2_int, b.y2_int)
            for b in bounding_boxes
        }

        return combined_image, bounding_boxes
Esempio n. 7
0
 def __refresh_sample_layout(self):
     if len(self.tmp_samples) > 0:
         self.aug_sample_imgs = list()
         clear_layout(self.__sample_layout)
         item = self.__action_tree.currentItem()
         if item is None:
             aug_imgs = self.tmp_samples
         else:
             try:
                 seq = Sequential(children=[item.to_aug()])
                 aug_imgs = seq(images=self.tmp_samples)
             except Exception as e:
                 print(e)
                 aug_imgs = self.tmp_samples
         for img_idx, img in enumerate(aug_imgs):
             img = Image.fromarray(img)
             img = img.toqpixmap()
             img = QPixmap(img)
             self.aug_sample_imgs.append(img)
             sample_img = SampleImage(img, side_thresh=230)
             sample_img.clicked.connect(self.__sample_clicked(img_idx))
             self.__sample_layout.addWidget(sample_img)
                       'swish': nn.swish,
                       'FixedDropout': FixedDropout
                   })
listImages = []
# Captura la imagen en cada cámara
for i in range(1):
    listImages.append(VideoCapture(i).read()[1])
    VideoCapture(i).release()
for image in listImages:
    # Comprobar que la imagen contiene humo
    if sum(
            sum(
                threshold((model.predict(array([
                    Sequential([
                        CenterPadToAspectRatio(
                            1., pad_mode='constant', pad_cval=0),
                        Resize((512, 512), interpolation=3)
                    ]).augment_image(cvtColor(image, 4)) / 255.
                ]).astype('float'),
                                         batch_size=1)[0][..., 0] *
                           255.).astype('uint8'), 127, 255,
                          0)[1].astype('uint8'))) > 4096:
        # Send signal
        if argv[1] == 'true':
            # Guarda la imagen
            imwrite(
                '/root/Smoke/Images/smoke/' +
                datetime.now().strftime('%Y%m%d%H%M%S') + '.png', image)
    else:
        if argv[1] == 'true':
            # Guarda la imagen
Esempio n. 9
0
pipe = Sequential(
    [

        #Fliplr(0.5),
        #Flipud(0.5),
        #EdgeDetect(1.0),

        #Sharpen( alpha = (0.5, 1.0)),
        #AdditiveGaussianNoise( loc=0, scale = (0.0, 0.001 * 255) ),
        ContrastNormalization((0.9, 1.1)),
        Multiply((0.9, 1.1)),

        #AverageBlur((1, 5)),

        #Alpha(

        #   0.65,
        #   first = Affine( translate_px = { "y": (-6, 6) } ),
        #   per_channel = False
        #)

        #ChangeColorspace(from_colorspace="BGR", to_colorspace="HSV"),
        #WithChannels(0, Add( (-5, 5) ) ),
        #WithChannels(1, Add( (-5, 5) ) ),
        #WithChannels(2, Add( (-45, 45) ) ),
        #ChangeColorspace(from_colorspace="HSV", to_colorspace="BGR"),
        Affine(shear=(-2, 2)),
        Affine(rotate=(-2, 2)),

        #AddToHueAndSaturation((-100, 100), per_channel=0.5, from_colorspace = 'BGR'),
        #Invert(1.0, per_channel=1.0)
        #Sometimes(0.5, Affine(rotate = 180))
    ],
    random_order=True)
Esempio n. 10
0
        return tuple([
            shape(inputs)[i] if sh is None else sh
            for i, sh in enumerate(self.noise_shape)
        ])


# Carga del modelo
model = load_model('/root/Smoke/Models/model.h5',
                   compile=False,
                   custom_objects={
                       'swish': nn.swish,
                       'FixedDropout': FixedDropout
                   })
# Creación de la imagen con humo redimensionada
imageSmokeAug = Sequential([
    CenterPadToAspectRatio(1., pad_mode='constant', pad_cval=0),
    Resize((512, 512), interpolation=3)
]).augment_image(cvtColor(imread('/root/Smoke/Test/smoke.png'), 4))
imageSmoke = array([imageSmokeAug / 255.]).astype('float')
# Comprueba el tiempo que tarda en predecir la imagen con humo (Siempre tardará más en la primera predicción)
before = datetime.now()
# Predice la imagen con humo
predictionSmoke = model.predict(imageSmoke, batch_size=1)
after = datetime.now()
print('Prediction time smoke image: ' + str((after - before).total_seconds()))
# Creación de la imagen sin humo redimensionada
imageNeutralAug = Sequential([
    CenterPadToAspectRatio(1., pad_mode='constant', pad_cval=0),
    Resize((512, 512), interpolation=3)
]).augment_image(cvtColor(imread('/root/Smoke/Test/neutral.png'), 4))
imageNeutral = array([imageNeutralAug / 255.]).astype('float')
# Comprueba el tiempo que tarda en predecir la imagen sin humo
Esempio n. 11
0
        # Validation dataset

        # 2017 valset
        dataset_val = CocoDataset()
        dataset_val.load_coco(args.dataset, "val2007")
        dataset_val.prepare()

        # Image Augmentation
        # Right/Left flip 50% of the time, IOU50=0.029(VALIDATION SET)
        # augmentation = imgaug.augmenters.Fliplr(0.5)

        # Flip and rotate
        augmentation = Sequential([
            Fliplr(0.5),
            GaussianBlur((0, 1.5)),
            CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05)),
            Affine(scale=(0.3, 2), rotate=(-20, 20)),
            Multiply((0.5, 1.5)),
            AddToHueAndSaturation((-20, 20))
        ])
        #augmentation = Sequential([Flipud(0.2), Fliplr(0.5), Affine(scale=(0.2, 2), rotate=(-60,60)), GaussianBlur((0, 2)), AddToHueAndSaturation((-30,30)), GammaContrast((0.5, 1.5)), AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5)])
        #augmentation = Sequential([Fliplr(0.5), Affine(scale=(0.3, 3), rotate=(-60,60)), GaussianBlur((0, 3)), AddToHueAndSaturation((-30,30)), GammaContrast((0.5, 1.5)), AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5) ])
        '''
            iaa.Sequential([
            # apply the following augmenters to most images
            iaa.Fliplr(0.5),  # horizontally flip 50% of all images
            iaa.Flipud(0.1),  # vertically flip 20% of all images
            # crop images by -5% to 10% of their height/width

            sometimes(iaa.Affine(
                scale={"x": (0.8, 1.2), "y": (0.8, 1.2)},
                # scale images to 80-120% of their size, individually per axis
Esempio n. 12
0
def _flip_lr():

    pipe = Sequential([Fliplr(1.0)])

    return pipe
Esempio n. 13
0
def _negative():

    pipe = Sequential([Invert(1.0, per_channel=1.0)])

    return pipe
Esempio n. 14
0
def _rotate(n):

    pipe = Sequential([Affine(rotate=n)])

    return pipe
Esempio n. 15
0
    If doing quantized training with toco compatibility there are a few constraints, namely:

        1. Minimum Tensorflow version has to be 1.13
        2. The current supported non-linearity elements are relu and relu6
        3. The convolution op instantiation has to have the non-linearity element set via the 'activation' argument
        4. The batch normalization op instantiation has to have the 'fused' argument set to false

'''

AUG_PIPE = Sequential([
    Sometimes(0.33, Affine(rotate=(-5, 5))),
    Sometimes(0.33, Affine(scale={
        "x": (0.85, 1.15),
        "y": (0.85, 1.15)
    })),
    Sometimes(0.33, Affine(shear=(-8, 8))),
    Sometimes(0.33, Multiply((0.9, 1.1))),
    Sometimes(0.33, AdditiveGaussianNoise(loc=0, scale=(0.0, 0.1 * 255))),
    Sometimes(0.33, ContrastNormalization((0.9, 1.1)))
],
                      random_order=True)

AUG_PIPE = None  # this is how you skip doing any augmentations, otherwise just have AUG_PIPE contain a imgaug Sequential instance


def model_function(images, train_flag):
    '''
    
        The 'name' argument for each of the op building functions can be whatever you want except 'input', 'prediction', 'loss', 'accuracy' 
        which are reserved for convenience. The names are the ones that will show up in tensorboard when viewing the op graph structure and can also be used to reference the input or output tensors of these ops.
    
Esempio n. 16
0
 def __init__(self, augmenters):
     assert type(augmenters) == list, 'augmenters should be of type `list`'
     self.augmenters = Sequential(augmenters)
Esempio n. 17
0
def _flip_ud():

    pipe = Sequential([Flipud(1.0)])

    return pipe
'''

    use pca to end up with a 2d plot which illustrades the clusters for each class of object that has to be detected
    pca will be applied on the contents of the box for each object


'''

counter = 0


aug_pipe = Sequential([
        
            #ChangeColorspace(from_colorspace="BGR", to_colorspace="HSV"),
            #WithChannels(0, Add((-360, 360))),
            #ChangeColorspace(from_colorspace="HSV", to_colorspace="BGR"),
            #ContrastNormalization((0.9, 1.1))
            AdditiveGaussianNoise( loc=0, scale = (0.0, 0.001 * 255) )


])

#aug_pipe = ContrastNormalization((0.9, 1.1))

#aug_pipe = Sequential([ AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255)), Affine(shear = (-5, 5))])

aug_pipe = AdditiveGaussianNoise(loc=0, scale=(0.0, 0.001*255))

def interval_overlap(interval_a, interval_b):
    
    x1, x2 = interval_a
    x3, x4 = interval_b