Beispiel #1
0
    def augment(self, n_augm, out_fname, update=False):
        # Define augmenter
        sometimes = lambda aug: iaa.Sometimes(0.5, aug)
        seq = iaa.Sequential(
            [
                # apply the following augmenters to most images
                iaa.Fliplr(0.5), # horizontally flip 50% of all images
                #iaa.Flipud(0.2), # vertically flip 20% of all images
                sometimes(iaa.Crop(percent=(0, 0.1))), # crop images by 0-10% of their height/width
                sometimes(iaa.Affine(
                    scale={"x": (0.9, 1.1), "y": (0.9, 1.1)}, # scale images to 80-120% of their size, individually per axis
                    translate_percent={"x": (-0.1, 0.1), "y": (-0.1, 0.1)}, # translate by -20 to +20 percent (per axis)
                    rotate=(-20, 20), # rotate by -45 to +45 degrees
                    shear=(-16, 16), # shear by -16 to +16 degrees
                    order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
                    cval=(0, 255), # if mode is constant, use a cval between 0 and 255
                    mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples)
                )),

                sometimes(
                    iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5) # add gaussian noise to images
                ),
            ],
            random_order=True
        )
                
        with h5py.File(self.path+out_fname, 'r+') as hf:
            for subject_k,subject_v in hf['train/pose6/'].items():
                if subject_k.split('_')[1] in ['F017', 'F018', 'F019','F020']:
                    for segment_k,segment_v in subject_v.items():
                        labels = np.asarray(segment_v['aus'])
                        idxs = self.balance(labels)
                        labels = labels[idxs]

                        for i in range(n_augm):
                            node  = 'train/pose6/'+subject_k+'/'+segment_k+'_'+str(i)
                            _ = hf.create_group(node)

                        for tp in ['faces', 'leye', 'reye', 'beye', 'nose', 'mouth', 'lmouth', 'rmouth']:
                            print '{}/{}/{}'.format(subject_k, segment_k, tp)
                            images = np.asarray(segment_v[tp])
                            if np.max(idxs)<images.shape[0]:
                                images = images[idxs]
                            else:
                                print ('-------An index error-------')
                                labels = np.asarray(segment_v['aus'])
                                
                            for i in range(n_augm):
                                augm_images = seq.augment_images(images)
                                node  = 'train/pose6/'+subject_k+'/'+segment_k+'_'+str(i)
                                print '     Writing node {} with data of shape {}'.format(node, augm_images.shape)
                                segment = hf[node]
                                
                                segment.create_dataset(tp, data=augm_images)
                                if tp=='faces':
                                    segment.create_dataset('aus', data=labels)
Beispiel #2
0
 def color_space_argumentation(self, image):
     seq = iaa.Sequential([
         iaa.OneOf([
             iaa.GaussianBlur((0, 3.0)),
             iaa.AverageBlur(k=(2, 7)),
             iaa.MedianBlur(k=(3, 7)),
         ]),
         iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)),
         iaa.Add((-2, 21), per_channel=0.5),
         iaa.Multiply((0.75, 1.25), per_channel=0.5),
         iaa.ContrastNormalization((0.5, 1.50), per_channel=0.5),
         iaa.Grayscale(alpha=(0.0, 0.50)),
     ],
                          random_order=False)
     return seq.augment_image(image)
def get_basic_imgaug_seq(seed):
    determinist = {
        "deterministic": True,
        "random_state": seed
    }
    train_seq = iaa.Sequential([
        iaa.Affine(translate_px=(-25, 25),
                   scale=(0.85, 1.15),
                   rotate=(-65, 65),
                   mode='reflect',
                   **determinist),
    ],
        random_order=True,
        **determinist
    )
    return train_seq
def get_high_zoom_imgaug_seq(seed):
    determinist = {
        "deterministic": True,
        "random_state": seed
    }
    train_seq = iaa.Sequential([
        # iaa.Sometimes(0.45, iaa.Sharpen(alpha=0.9, lightness=(0.5, 1.15), **determinist), **determinist),
        iaa.Sometimes(0.45, iaa.ContrastNormalization(alpha=(0.75, 1.15), **determinist), **determinist),
        # iaa.Sometimes(0.5, iaa.AdditiveGaussianNoise(scale=(0, 0.01 * 255),
        #                                              per_channel=True, **determinist), **determinist),
        iaa.Affine(translate_px=(-5, 5),
                   scale=(1.0, 1.75),
                   rotate=(-65, 65),
                   mode='reflect',
                   **determinist),
        # iaa.Add(value=(-35, 35), per_channel=True),  # Probably, can change nature of label
    ],
        random_order=True,
        **determinist
    )
    return train_seq
def augment(img_data, augment=True):
    assert 'filepath' in img_data
    assert 'bboxes' in img_data
    assert 'width' in img_data
    assert 'height' in img_data

    img_data_aug = copy.deepcopy(img_data)

    img = cv2.imread(img_data_aug['filepath'])
    # add dimension to keep imgaug tutorial code unchanged
    images = np.expand_dims(img, axis=0)

    if augment:
        # The augmenters expect a list of imgaug.KeypointsOnImage.
        keypoints_on_images = []
        for image in images:
            keypoints = []
            for bbox in img_data_aug['bboxes']:
                # for every bbox x1, y1, x2, y2 in pairs
                x1 = bbox['x1']
                y1 = bbox['y1']
                keypoints.append(ia.Keypoint(x=x1, y=y1))

                x2 = bbox['x2']
                y2 = bbox['y2']
                keypoints.append(ia.Keypoint(x=x2, y=y2))
            keypoints_on_images.append(
                ia.KeypointsOnImage(keypoints, shape=image.shape))

        # Define our sequence of augmentation steps that will be applied to every image
        # All augmenters with per_channel=0.5 will sample one value _per image_
        # in 50% of all cases. In all other cases they will sample new values
        # _per channel_.
        # CHANGE PARAMETERS IN DATA_AUGMENT_V2_PARAMETERS FILE
        seq = iaa.Sequential(augment_parameters.get_augment_parameters(),
                             random_order=True)

        # maybe show image transformation sample in grid
        if SHOW_AUGMENTED_IMAGE_SAMPLES:
            seq.show_grid(images, cols=8, rows=8)
        seq_det = seq.to_deterministic(
        )  # call this for each batch again, NOT only once at the start

        # augment keypoints and images
        images_aug = seq_det.augment_images(images)
        keypoints_aug = seq_det.augment_keypoints(keypoints_on_images)

        # maybe show keypoints
        if SHOW_KEYPOINTS:
            for img_idx, (image_before, image_after, keypoints_before,
                          keypoints_after) in enumerate(
                              zip(images, images_aug, keypoints_on_images,
                                  keypoints_aug)):
                image_before = keypoints_before.draw_on_image(image_before)
                image_after = keypoints_after.draw_on_image(image_after)
                misc.imshow(np.concatenate((image_before, image_after),
                                           axis=1))  # before and after
                for kp_idx, keypoint in enumerate(keypoints_after.keypoints):
                    keypoint_old = keypoints_on_images[img_idx].keypoints[
                        kp_idx]
                    x_old, y_old = keypoint_old.x, keypoint_old.y
                    x_new, y_new = keypoint.x, keypoint.y
                    print(
                        "[Keypoints for image #%d] before aug: x=%d y=%d | after aug: x=%d y=%d"
                        % (img_idx, x_old, y_old, x_new, y_new))

        # update bboxes coordinates after augmentation
        keypoints = keypoints_aug[0].get_coords_array()
        for i, bbox in enumerate(img_data_aug['bboxes']):
            bbox['x1'] = keypoints[2 * i][0]
            bbox['y1'] = keypoints[2 * i][1]
            bbox['x2'] = keypoints[2 * i + 1][0]
            bbox['y2'] = keypoints[2 * i + 1][1]

        img = images_aug[0]

    img_data_aug['width'] = img.shape[1]
    img_data_aug['height'] = img.shape[0]
    return img_data_aug, img
def get_id_imgaug_seq():
    return iaa.Sequential()