Beispiel #1
0
    def test_augment_images__hue(self):
        def augment_images(images, random_state, parents, hooks):
            assert images[0].dtype.name == "int16"
            images = np.copy(images)
            images[..., 0] += 10
            return images

        aug = iaa.WithHueAndSaturation(iaa.Lambda(func_images=augment_images))

        # example image
        image = np.arange(0, 255).reshape((1, 255, 1)).astype(np.uint8)
        image = np.tile(image, (1, 1, 3))
        image[..., 0] += 0
        image[..., 1] += 1
        image[..., 2] += 2

        # compute expected output
        image_hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
        image_hsv = image_hsv.astype(np.int16)
        image_hsv[..., 0] = ((image_hsv[..., 0].astype(np.float32) / 180) *
                             255).astype(np.int16)
        image_hsv[..., 0] += 10
        image_hsv[..., 0] = np.mod(image_hsv[..., 0], 255)
        image_hsv[..., 0] = ((image_hsv[..., 0].astype(np.float32) / 255) *
                             180).astype(np.int16)
        image_hsv = image_hsv.astype(np.uint8)
        image_expected = cv2.cvtColor(image_hsv, cv2.COLOR_HSV2RGB)
        assert not np.array_equal(image_expected, image)

        # augment and verify
        images_aug = aug.augment_images(np.stack([image, image], axis=0))
        assert ia.is_np_array(images_aug)
        for image_aug in images_aug:
            assert image_aug.shape == (1, 255, 3)
            assert np.array_equal(image_aug, image_expected)
Beispiel #2
0
def augmenter(batch_size=4,
              crop_size=256,
              num_channels=3,
              crop_before_augs=(),
              crop_after_augs=()):
    n_img = batch_size
    size = crop_size
    n_ch = num_channels

    def func_images(images, random_state, parents, hooks):
        ret_imgs = np.empty((n_img, size, size, n_ch))
        for idx, img in enumerate(images):
            h, w, c = img.shape
            y = random_state.randint(0, h - size)
            x = random_state.randint(0, w - size)
            ret_imgs[idx] = img[y:y + size, x:x + size].reshape(
                (size, size, n_ch))
        return ret_imgs

    def func_heatmaps(heatmaps, random_state, parents, hooks):
        return heatmaps

    def func_keypoints(keypoints_on_images, random_state, parents, hooks):
        return keypoints_on_images

    aug_list = (\
       list(crop_before_augs) +
       [iaa.Lambda(
         func_images=func_images,
         func_heatmaps=func_heatmaps,
         func_keypoints=func_keypoints)] +
       list(crop_after_augs)
    )
    print(aug_list)
    return iaa.Sequential(aug_list)
Beispiel #3
0
def aug_val_img(img):
    '''对uint8图像或图像的一个batch(NHWC)进行aug'''
    aug_seq = iaa.Lambda(func_images=func_images)
    
    if (img.ndim == 3):
        # 对单幅图像进行aug
        img_aug = aug_seq.augment_image(img)
    elif (img.ndim == 4):
        # 对一个batch进行aug
        img_aug = aug_seq.augment_images(img)
    else:
        img_aug = []
    return img_aug
Beispiel #4
0
def chapter_augmenters_lambda():
    def img_func(images, random_state, parents, hooks):
        for img in images:
            img[::4] = 0
        return images

    def keypoint_func(keypoints_on_images, random_state, parents, hooks):
        return keypoints_on_images

    aug = iaa.Lambda(img_func, keypoint_func)
    run_and_save_augseq("meta/lambda.jpg",
                        aug, [ia.quokka(size=(128, 128)) for _ in range(8)],
                        cols=4,
                        rows=2)
Beispiel #5
0
def data_augment_test():
    image_path = os.path.join(voc2012_root_path, "JPEGImages/2008_000008.jpg")
    src_image = cv2.imread(image_path)

    rotate, func_keypoints = get_rotate_keypoints(50)
    local_seq = iaa.Sequential([
        # iaa.Affine(rotate=random.randint(0, 360), cval=imgaug.ALL)
        iaa.Lambda(func_images=rotate, func_keypoints=func_keypoints)
    ])
    for i in range(100):
        dst_image = seq.augment_image(src_image)
        print dst_image.shape
        cv2.imshow("src_image", src_image)
        cv2.imshow("dst_image", dst_image)
        cv2.waitKey(1000)
    exit()
Beispiel #6
0
def get_augmentation_sequence():
    sometimes = lambda aug: iaa.Sometimes(0.5, aug)

    slice_thickness_augmenter = iaa.Lambda(
        func_images=slice_thickness_func_images,
        func_keypoints=slice_thickness_func_keypoints)

    seq = iaa.Sequential([
        sometimes(iaa.Fliplr(0.5)),
        iaa.Sometimes(0.1, iaa.Add((-70, 70))),
        sometimes(
            iaa.Affine(scale={
                "x": (0.8, 1.2),
                "y": (0.8, 1.2)
            }  # scale images to 80-120% of their size, individually per axis
                       )),
        # sometimes(iaa.Multiply((0.5, 1.5))),
        # sometimes(iaa.ContrastNormalization((0.5, 2.0))),
        #     sometimes(iaa.Affine(
        #     translate_percent={"x": (-0.02, 0.02), "y": (-0.02, 0.02)}, # translate by -20 to +20 percent (per axis)
        #     rotate=(-2, 2), # rotate by -45 to +45 degrees
        #     shear=(-2, 2), # shear by -16 to +16 degrees
        #     order=[0, 1], # use nearest neighbour or bilinear interpolation (fast)
        #     cval=(0, 255), # if mode is constant, use a cval between 0 and 255
        #     mode='constant' # use any of scikit-image's warping modes (see 2nd image from the top for examples)
        # )),
        #     sometimes(iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05))),
        sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.01))),
        iaa.Sometimes(
            0.1,
            iaa.SimplexNoiseAlpha(iaa.OneOf(
                [iaa.Add((150, 255)),
                 iaa.Add((-100, 100))]),
                                  sigmoid_thresh=5)),
        iaa.Sometimes(
            0.1,
            iaa.OneOf([
                iaa.CoarseDropout((0.01, 0.15), size_percent=(0.02, 0.08)),
                iaa.CoarseSaltAndPepper(p=0.2, size_percent=0.01),
                iaa.CoarseSalt(p=0.2, size_percent=0.02)
            ])),
        iaa.Sometimes(0.25, slice_thickness_augmenter)
    ])
    return seq
def build_augmenters(args, augmenters=None):
    ''' Construct a list of augmenters which rely on the arguments handled by the parent parser'''

    if augmenters is None:
        augmenters = []

    opacity_range = args.opacity
    if opacity_range != (-1.0, -1.0):
        # set the alpha channel to a random value
        if opacity_range == (1.0, 1.0):
            opacity_range = (1.0)
        augmenters.insert(
            0, iaa.Lambda(func_images=set_alpha_wrapper(opacity_range)))

    scale = args.scale
    if scale != (1.0, 1.0):
        augmenters.append(
            iaa.Affine(scale=scale)
        )  # scale the image between 20% to 125% of its original size

    rotate = args.rotate
    if rotate != (0.0, 0.0):
        augmenters.append(
            iaa.Rotate(rotate)
        )  # rotate the image somewhere in the range of -10.0 - 10.0 deg

    shear = args.shear
    if shear != (0.0, 0.0):
        augmenters.append(
            iaa.ShearX(shear)
        )  # rotate the image somewhere in the range of -10.0 - 10.0 deg

    noise = args.noise
    if noise != (0, 0):
        augmenters.append(iaa.AdditiveGaussianNoise(scale=noise))

    grayscale = args.grayscale
    if grayscale != (0.0, 1.0):
        augmenters.append(
            iaa.WithChannels([0, 1, 2], iaa.Grayscale(alpha=(0.0, 1.0))))

    return augmenters
Beispiel #8
0
def generate_iaa_sequence(policy: List):
    """ Randomly generate image augment sequence """
    head_aug = [
        iaa.Sometimes(
            .5,
            iaa.OneOf([
                iaa.Add((-10, 10), per_channel=0.5),
                iaa.Multiply((0.9, 1.1), per_channel=0.5),
                iaa.ContrastNormalization((0.9, 1.1), per_channel=0.5)
            ]))
    ]

    body_aug = policy[np.random.randint(0, len(policy))]
    tail_aug = [
        iaa.Lambda(func_images=cutout_wrap, func_keypoints=None, name="cutout")
    ]

    seq = iaa.Sequential(head_aug + body_aug + tail_aug)

    return seq
Beispiel #9
0
    def trapezoidal_mask(cls, lower_left, lower_right, upper_left, upper_right,
                         min_y, max_y):
        """
        Uses a binary mask to generate a trapezoidal region of interest.
        Especially useful in filtering out uninteresting features from an
        input image.
        """
        def _transform_images(images, random_state, parents, hooks):
            # Transform a batch of images
            transformed = []
            mask = None
            for image in images:
                if mask is None:
                    mask = np.zeros(image.shape, dtype=np.int32)
                    # # # # # # # # # # # # #
                    #       ul     ur          min_y
                    #
                    #
                    #
                    #    ll             lr     max_y
                    points = [[upper_left, min_y], [upper_right, min_y],
                              [lower_right, max_y], [lower_left, max_y]]
                    cv2.fillConvexPoly(mask, np.array(points, dtype=np.int32),
                                       [255, 255, 255])
                    mask = np.asarray(mask, dtype='bool')

                masked = np.multiply(image, mask)
                transformed.append(masked)

            return transformed

        def _transform_keypoints(keypoints_on_images, random_state, parents,
                                 hooks):
            # No-op
            return keypoints_on_images

        augmentation = iaa.Lambda(func_images=_transform_images,
                                  func_keypoints=_transform_keypoints)
        return augmentation
Beispiel #10
0
 def __init__(self,
              images,
              labels,
              batch_size=16,
              image_shape=(256, 512, 1),
              do_shuffle_at_epoch_end=True,
              length=None,
              do_augment=True):
     self.number_batches_augmentation = 4
     self.labels = labels  # array of labels
     self.images = images  # array of image paths
     self.input_shape = image_shape  # image dimensions
     self.label_shape = (image_shape[0], image_shape[1], 1)
     self.length = length
     self.batch_size = batch_size  # batch size
     self.shuffle = do_shuffle_at_epoch_end  # shuffle bool
     self.augment = do_augment  # augment data bool
     self.augmenting_pipeline = iaa.Sequential([
         iaa.PadToFixedSize(pad_mode="symmetric", width=1024, height=0),
         iaa.Affine(shear=(-20, 20)),
         iaa.CropToFixedSize(width=512, height=256, position="center"),
         iaa.Affine(scale={
             "x": (1, 1.5),
             "y": (1, 1.5)
         },
                    translate_percent={"x": (-0.4, 0.4)},
                    mode="symmetric",
                    cval=[0, 0, 0, 0]),
         iaa.PerspectiveTransform(scale=(0, 0.10)),
         iaa.CropToFixedSize(width=512, height=256, position="center-top"),
         # iaa.PiecewiseAffine(scale=(0.001, 0.01), nb_rows=4, nb_cols=8),
         iaa.Lambda(func_images=self.add_background,
                    func_segmentation_maps=self.convert_segmentations),
     ])
     self.indexes = np.arange(len(self.images))
     self.on_epoch_end()
Beispiel #11
0
def make_synthetic_prev_mask_complex_mask_augmenter(crop_size):
    h, w = crop_size

    return iaa.Sequential([
        iaa.Sometimes(0.5, iaa.Lambda(func_segmentation_maps=choose_random_objects_mask_augmenter)),
        iaa.Lambda(func_segmentation_maps=morph_close_mask_augmenter),
        iaa.Sometimes(0.3,
            # failed mask
            iaa.OneOf([
                iaa.TotalDropout(1.0),  # fill image
                iaa.Sequential([  # fail mask
                    iaa.OneOf([
                        iaa.Lambda(func_segmentation_maps=make_morph_operation_mask_augmenter(cv2.erode, min_coef=0.2, max_coef=0.5)),
                        iaa.Lambda(func_segmentation_maps=make_morph_operation_mask_augmenter(cv2.dilate, min_coef=0.2, max_coef=0.5)),
                    ]),
                    iaa.Affine(translate_percent=iap.Choice([iap.Uniform(-0.5, -0.2), iap.Uniform(0.2, 0.5)]))
                ])
            ]),

            # normal mask
            iaa.Sequential([
                iaa.Sometimes(0.1, iaa.OneOf([
                    iaa.Lambda(func_segmentation_maps=make_morph_operation_mask_augmenter(cv2.erode)),  # smaller mask
                    iaa.Lambda(func_segmentation_maps=make_morph_operation_mask_augmenter(cv2.dilate)),  # larger mask
                ])),
                iaa.Sometimes(1.0, iaa.Affine(
                    scale=iap.Normal(loc=1, scale=0.02),
                    translate_percent=iap.Normal(loc=0, scale=0.03),
                    shear=iap.Normal(loc=0, scale=1),
                    backend='cv2'
                )),
                iaa.Sometimes(0.1,
                    iaa.ElasticTransformation(alpha=2000, sigma=50)
                ),
                iaa.Sometimes(0.1,
                    iaa.PiecewiseAffine()
                )
            ])
        )
    ], random_order=False)
    # Convert float64 to uint8
    img_ret *= 255
    img_ret = np.uint8(img_ret)
    return img_ret

def func_images(images, random_state, parents, hooks):
    images_ret = images.copy()
    # WxHxCxB
    for i in range(images.shape[3]):
        val_fgd = np.random.uniform(0.5, 1.0)
        val_bgd = np.random.uniform(0.3, 0.5)
        images_ret[:, :, :, i] = fill_color(
            images_ret[:, :, :, i], val_fgd, val_bgd)
    return images_ret

shade_aug = iaa.Lambda(func_images=func_images)
shade_seq = iaa.Sequential([
    shade_aug,
])

# ========== Other Augmentation =============
# perspective transform
# rotation (affine transform)
# blur (gaussian blur)
# noise (gaussian noise)
other_seq = iaa.Sequential([
    iaa.PerspectiveTransform(scale=(0.01, 0.15)),
    iaa.Affine(rotate=(-30, 30)),
    iaa.GaussianBlur(sigma=(0.0, 1.0)),
    iaa.AdditiveGaussianNoise(scale=(0, 0.1*255)),
])
Beispiel #13
0
    # Values around 1.0 lead to a contrast-adjusted images. Values above 1.0 quickly lead to partially broken
    # images due to exceeding the datatype’s value range.
    "Log_Contrast": lambda lo, hi, percent: iaa.LogContrast((lo, hi), per_channel=percent),

    # Augmenter that changes the contrast of images using a unique formula (sigmoid).
    # Multiplier for sigmoid function is between lo and hi, sampled randomly per image. c_lo and c_hi decide the
    # cutoff value that shifts the sigmoid function in horizontal direction (Higher values mean that the switch
    # from dark to light pixels happens later, i.e. the pixels will remain darker).
    # For percent of all images values are sampled independently per channel:
    "Sigmoid_Contrast": lambda lo, hi, c_lo, c_hi, percent:
    iaa.SigmoidContrast((lo, hi), (c_lo, c_hi), per_channel=percent),

    # Augmenter that calls a custom (lambda) function for each batch of input image.
    # Extracts Canny Edges from images (refer to description in CO)
    # Good default values for min and max are 100 and 200
    'Custom_Canny_Edges': lambda min_val, max_val: iaa.Lambda(
        func_images=CO.Edges(min_value=min_val, max_value=max_val)),

}


def get_legacy_da_scheme():
    from src.genotype.cdn.nodes.da_node import DANode

    def _get_node_type(i, max_i):
        return NodeType.INPUT if i == 0 else NodeType.OUTPUT if i == max_i - 1 else NodeType.HIDDEN

    num_das = 6
    nodes: List[DANode] = [DANode(i, _get_node_type(i, num_das)) for i in range(num_das)]
    _make_da_scheme_legacy(nodes)
    # Note: the connection IDs should not matter because DA connections will never be mutated
    return DAGenome(nodes,
Beispiel #14
0
        image_aug = np.copy(image)
        if (image.dtype == np.uint8):
            image_aug = image_aug / 255
        result.append(image_aug)
    return result


def center_rescaled_images(images, random_state, parents, hooks):

    result = []
    for image in images:
        image_aug = np.copy(image)
        if (image.dtype == np.uint8):
            image_aug = image_aug - 128
        else:
            image_aug = 2 * (image_aug - 0.5)
        result.append(image_aug)
    return result


void_fun = lambda x, random_state, parents, hooks: x

rescale_augmenter = iaa.Lambda(func_images=rescale_images,
                               func_segmentation_maps=void_fun,
                               func_heatmaps=void_fun,
                               func_keypoints=void_fun)

center_augmenter = iaa.Lambda(func_images=center_rescaled_images,
                              func_heatmaps=void_fun,
                              func_keypoints=void_fun)
    lambda df: df.day.isin(train_val_test_split["train_set_dates"])]
val_set = all_annotations.loc[
    lambda df: df.day.isin(train_val_test_split["val_set_dates"])]
test_set = all_annotations.loc[lambda df: df.day.isin(train_val_test_split[
    "test_set_dates"])].reset_index(drop=True)

#%% Init model
preprocessing = iaa.Sequential([
    iaa.Fliplr(0.5),
    iaa.Flipud(0.5),
    iaa.Affine(rotate=(-180, 180)),
    iaa.CropToFixedSize(224, 224, position="center"),
    iaa.PadToFixedSize(224, 224, position="center"),
    iaa.AssertShape((None, 224, 224, 3)),
    iaa.Lambda(
        lambda images_list, *_: keras_applications.resnet50.preprocess_input(
            np.stack(images_list), data_format="channels_last")),
])

siamese_nets = SiameseNets(
    branch_model={
        "name": "ResNet50",
        "init": {
            "include_top": False,
            "input_shape": (224, 224, 3),
            "pooling": "avg"
        }
    },
    head_model={
        "name": "MixedNorms",
        "init": {
Beispiel #16
0
    def __init__(self):
        self.FACE_POINTS = list(range(17, 68))
        self.MOUTH_POINTS = list(range(48, 68))
        self.RIGHT_BROW_POINTS = list(range(17, 22))
        self.LEFT_BROW_POINTS = list(range(22, 27))
        self.RIGHT_EYE_POINTS = list(range(36, 42))
        self.LEFT_EYE_POINTS = list(range(42, 48))
        self.NOSE_POINTS = list(range(27, 35))
        self.JAW_POINTS = list(range(0, 17))

        self.FACE_POINTS = list(range(0, 27))

        self.ALIGN_POINTS = (self.LEFT_BROW_POINTS + self.RIGHT_EYE_POINTS +
                             self.LEFT_EYE_POINTS + self.RIGHT_BROW_POINTS +
                             self.NOSE_POINTS + self.MOUTH_POINTS)

        self.r1 = [
            self.LEFT_EYE_POINTS + self.RIGHT_EYE_POINTS +
            self.LEFT_BROW_POINTS + self.RIGHT_BROW_POINTS,
            self.NOSE_POINTS + self.MOUTH_POINTS
        ]

        def t1(images, random_state, parents, hooks):
            T = []
            iter = np.random.randint(1, 3)
            for i in range(iter):
                T.append(iaa.MinPooling(kernel_size=5, keep_size=True))
                T.append(iaa.GaussianBlur(sigma=3.))
            if np.random.rand() > 0.5:
                if np.random.rand() > 0.5:
                    T.append(iaa.MedianBlur(k=3))
                else:
                    T.append(iaa.AverageBlur(k=3))
                T.append(iaa.GaussianBlur(sigma=(0., 3.)))
            images = iaa.Sequential(T).augment_images(images)
            return images

        self.t1 = iaa.Lambda(func_images=t1)

        self.r2 = [self.FACE_POINTS]

        def t2(images, random_state, parents, hooks):
            iter = np.random.randint(6, 9)
            T = []
            for i in range(iter):
                T.append(iaa.MinPooling(kernel_size=5, keep_size=True))
                T.append(iaa.GaussianBlur(sigma=3.))
            images = iaa.Sequential(T).augment_images(images)
            return images

        self.t2 = iaa.Lambda(func_images=t2)

        self.r3 = [self.MOUTH_POINTS]

        def t3(images, random_state, parents, hooks):
            T = []
            iter = np.random.randint(1, 3)
            for i in range(iter):
                T.append(iaa.MinPooling(kernel_size=5, keep_size=True))
                T.append(iaa.GaussianBlur(sigma=3.))
            if np.random.rand() > 0.5:
                if np.random.rand() > 0.5:
                    T.append(iaa.MedianBlur(k=3))
                else:
                    T.append(iaa.AverageBlur(k=3))
                T.append(iaa.GaussianBlur(sigma=(0., 3.)))
            images = iaa.Sequential(T).augment_images(images)
            return images

        self.t3 = iaa.Lambda(func_images=t3)

        self.r4 = [self.LEFT_EYE_POINTS + self.RIGHT_EYE_POINTS]

        def t4(images, random_state, parents, hooks):
            T = []
            iter = np.random.randint(1, 3)
            for i in range(iter):
                T.append(iaa.MinPooling(kernel_size=5, keep_size=True))
                T.append(iaa.GaussianBlur(sigma=3.))
            if np.random.rand() > 0.5:
                if np.random.rand() > 0.5:
                    T.append(iaa.MedianBlur(k=3))
                else:
                    T.append(iaa.AverageBlur(k=3))
                T.append(iaa.GaussianBlur(sigma=(0., 3.)))
            images = iaa.Sequential(T).augment_images(images)
            return images

        self.t4 = iaa.Lambda(t4)

        self.r5 = [self.NOSE_POINTS]

        def t5(images, random_state, parents, hooks):
            T = []
            iter = np.random.randint(1, 3)
            for i in range(iter):
                T.append(iaa.MinPooling(kernel_size=5, keep_size=True))
                T.append(iaa.GaussianBlur(sigma=3.))
            if np.random.rand() > 0.5:
                if np.random.rand() > 0.5:
                    T.append(iaa.MedianBlur(k=3))
                else:
                    T.append(iaa.AverageBlur(k=3))
                T.append(iaa.GaussianBlur(sigma=(0., 3.)))
            images = iaa.Sequential(T).augment_images(images)
            return images

        self.t5 = iaa.Lambda(t5)

        self.r6 = [
            self.LEFT_EYE_POINTS + self.RIGHT_EYE_POINTS + self.NOSE_POINTS
        ]

        def t6(images, random_state, parents, hooks):
            T = []
            iter = np.random.randint(1, 3)
            for i in range(iter):
                T.append(iaa.MinPooling(kernel_size=5, keep_size=True))
                T.append(iaa.GaussianBlur(sigma=3.))
            if np.random.rand() > 0.5:
                if np.random.rand() > 0.5:
                    T.append(iaa.MedianBlur(k=3))
                else:
                    T.append(iaa.AverageBlur(k=3))
                T.append(iaa.GaussianBlur(sigma=(0., 3.)))
            images = iaa.Sequential(T).augment_images(images)
            return images

        self.t6 = iaa.Lambda(t6)

        self.choice = [self.r1, self.r2, self.r3, self.r4, self.r5, self.r6]
        self.T_choice = [self.t1, self.t2, self.t3, self.t4, self.t5, self.t6]

        self.detector = dlib.get_frontal_face_detector()

        self.PREDICTOR_PATH = "shape_predictor_68_face_landmarks.dat"
        self.predictor = dlib.shape_predictor(self.PREDICTOR_PATH)
def create_augmenters(height, width, height_augmentable, width_augmentable,
                      only_augmenters):
    def lambda_func_images(images, random_state, parents, hooks):
        return images

    def lambda_func_heatmaps(heatmaps, random_state, parents, hooks):
        return heatmaps

    def lambda_func_keypoints(keypoints, random_state, parents, hooks):
        return keypoints

    def assertlambda_func_images(images, random_state, parents, hooks):
        return True

    def assertlambda_func_heatmaps(heatmaps, random_state, parents, hooks):
        return True

    def assertlambda_func_keypoints(keypoints, random_state, parents, hooks):
        return True

    augmenters_meta = [
        iaa.Sequential([iaa.Noop(), iaa.Noop()],
                       random_order=False,
                       name="Sequential_2xNoop"),
        iaa.Sequential([iaa.Noop(), iaa.Noop()],
                       random_order=True,
                       name="Sequential_2xNoop_random_order"),
        iaa.SomeOf((1, 3),
                   [iaa.Noop(), iaa.Noop(), iaa.Noop()],
                   random_order=False,
                   name="SomeOf_3xNoop"),
        iaa.SomeOf((1, 3),
                   [iaa.Noop(), iaa.Noop(), iaa.Noop()],
                   random_order=True,
                   name="SomeOf_3xNoop_random_order"),
        iaa.OneOf([iaa.Noop(), iaa.Noop(), iaa.Noop()], name="OneOf_3xNoop"),
        iaa.Sometimes(0.5, iaa.Noop(), name="Sometimes_Noop"),
        iaa.WithChannels([1, 2], iaa.Noop(), name="WithChannels_1_and_2_Noop"),
        iaa.Noop(name="Noop"),
        iaa.Lambda(func_images=lambda_func_images,
                   func_heatmaps=lambda_func_heatmaps,
                   func_keypoints=lambda_func_keypoints,
                   name="Lambda"),
        iaa.AssertLambda(func_images=assertlambda_func_images,
                         func_heatmaps=assertlambda_func_heatmaps,
                         func_keypoints=assertlambda_func_keypoints,
                         name="AssertLambda"),
        iaa.AssertShape((None, height_augmentable, width_augmentable, None),
                        name="AssertShape"),
        iaa.ChannelShuffle(0.5, name="ChannelShuffle")
    ]
    augmenters_arithmetic = [
        iaa.Add((-10, 10), name="Add"),
        iaa.AddElementwise((-10, 10), name="AddElementwise"),
        #iaa.AddElementwise((-500, 500), name="AddElementwise"),
        iaa.AdditiveGaussianNoise(scale=(5, 10), name="AdditiveGaussianNoise"),
        iaa.AdditiveLaplaceNoise(scale=(5, 10), name="AdditiveLaplaceNoise"),
        iaa.AdditivePoissonNoise(lam=(1, 5), name="AdditivePoissonNoise"),
        iaa.Multiply((0.5, 1.5), name="Multiply"),
        iaa.MultiplyElementwise((0.5, 1.5), name="MultiplyElementwise"),
        iaa.Dropout((0.01, 0.05), name="Dropout"),
        iaa.CoarseDropout((0.01, 0.05),
                          size_percent=(0.01, 0.1),
                          name="CoarseDropout"),
        iaa.ReplaceElementwise((0.01, 0.05), (0, 255),
                               name="ReplaceElementwise"),
        #iaa.ReplaceElementwise((0.95, 0.99), (0, 255), name="ReplaceElementwise"),
        iaa.SaltAndPepper((0.01, 0.05), name="SaltAndPepper"),
        iaa.ImpulseNoise((0.01, 0.05), name="ImpulseNoise"),
        iaa.CoarseSaltAndPepper((0.01, 0.05),
                                size_percent=(0.01, 0.1),
                                name="CoarseSaltAndPepper"),
        iaa.Salt((0.01, 0.05), name="Salt"),
        iaa.CoarseSalt((0.01, 0.05),
                       size_percent=(0.01, 0.1),
                       name="CoarseSalt"),
        iaa.Pepper((0.01, 0.05), name="Pepper"),
        iaa.CoarsePepper((0.01, 0.05),
                         size_percent=(0.01, 0.1),
                         name="CoarsePepper"),
        iaa.Invert(0.1, name="Invert"),
        # ContrastNormalization
        iaa.JpegCompression((50, 99), name="JpegCompression")
    ]
    augmenters_blend = [
        iaa.Alpha((0.01, 0.99), iaa.Noop(), name="Alpha"),
        iaa.AlphaElementwise((0.01, 0.99), iaa.Noop(),
                             name="AlphaElementwise"),
        iaa.SimplexNoiseAlpha(iaa.Noop(), name="SimplexNoiseAlpha"),
        iaa.FrequencyNoiseAlpha((-2.0, 2.0),
                                iaa.Noop(),
                                name="FrequencyNoiseAlpha")
    ]
    augmenters_blur = [
        iaa.GaussianBlur(sigma=(1.0, 5.0), name="GaussianBlur"),
        iaa.AverageBlur(k=(3, 11), name="AverageBlur"),
        iaa.MedianBlur(k=(3, 11), name="MedianBlur"),
        iaa.BilateralBlur(d=(3, 11), name="BilateralBlur"),
        iaa.MotionBlur(k=(3, 11), name="MotionBlur")
    ]
    augmenters_color = [
        # InColorspace (deprecated)
        iaa.WithColorspace(to_colorspace="HSV",
                           children=iaa.Noop(),
                           name="WithColorspace"),
        iaa.WithHueAndSaturation(children=iaa.Noop(),
                                 name="WithHueAndSaturation"),
        iaa.MultiplyHueAndSaturation((0.8, 1.2),
                                     name="MultiplyHueAndSaturation"),
        iaa.MultiplyHue((-1.0, 1.0), name="MultiplyHue"),
        iaa.MultiplySaturation((0.8, 1.2), name="MultiplySaturation"),
        iaa.AddToHueAndSaturation((-10, 10), name="AddToHueAndSaturation"),
        iaa.AddToHue((-10, 10), name="AddToHue"),
        iaa.AddToSaturation((-10, 10), name="AddToSaturation"),
        iaa.ChangeColorspace(to_colorspace="HSV", name="ChangeColorspace"),
        iaa.Grayscale((0.01, 0.99), name="Grayscale"),
        iaa.KMeansColorQuantization((2, 16), name="KMeansColorQuantization"),
        iaa.UniformColorQuantization((2, 16), name="UniformColorQuantization")
    ]
    augmenters_contrast = [
        iaa.GammaContrast(gamma=(0.5, 2.0), name="GammaContrast"),
        iaa.SigmoidContrast(gain=(5, 20),
                            cutoff=(0.25, 0.75),
                            name="SigmoidContrast"),
        iaa.LogContrast(gain=(0.7, 1.0), name="LogContrast"),
        iaa.LinearContrast((0.5, 1.5), name="LinearContrast"),
        iaa.AllChannelsCLAHE(clip_limit=(2, 10),
                             tile_grid_size_px=(3, 11),
                             name="AllChannelsCLAHE"),
        iaa.CLAHE(clip_limit=(2, 10),
                  tile_grid_size_px=(3, 11),
                  to_colorspace="HSV",
                  name="CLAHE"),
        iaa.AllChannelsHistogramEqualization(
            name="AllChannelsHistogramEqualization"),
        iaa.HistogramEqualization(to_colorspace="HSV",
                                  name="HistogramEqualization"),
    ]
    augmenters_convolutional = [
        iaa.Convolve(np.float32([[0, 0, 0], [0, 1, 0], [0, 0, 0]]),
                     name="Convolve_3x3"),
        iaa.Sharpen(alpha=(0.01, 0.99), lightness=(0.5, 2), name="Sharpen"),
        iaa.Emboss(alpha=(0.01, 0.99), strength=(0, 2), name="Emboss"),
        iaa.EdgeDetect(alpha=(0.01, 0.99), name="EdgeDetect"),
        iaa.DirectedEdgeDetect(alpha=(0.01, 0.99), name="DirectedEdgeDetect")
    ]
    augmenters_edges = [iaa.Canny(alpha=(0.01, 0.99), name="Canny")]
    augmenters_flip = [
        iaa.Fliplr(1.0, name="Fliplr"),
        iaa.Flipud(1.0, name="Flipud")
    ]
    augmenters_geometric = [
        iaa.Affine(scale=(0.9, 1.1),
                   translate_percent={
                       "x": (-0.05, 0.05),
                       "y": (-0.05, 0.05)
                   },
                   rotate=(-10, 10),
                   shear=(-10, 10),
                   order=0,
                   mode="constant",
                   cval=(0, 255),
                   name="Affine_order_0_constant"),
        iaa.Affine(scale=(0.9, 1.1),
                   translate_percent={
                       "x": (-0.05, 0.05),
                       "y": (-0.05, 0.05)
                   },
                   rotate=(-10, 10),
                   shear=(-10, 10),
                   order=1,
                   mode="constant",
                   cval=(0, 255),
                   name="Affine_order_1_constant"),
        iaa.Affine(scale=(0.9, 1.1),
                   translate_percent={
                       "x": (-0.05, 0.05),
                       "y": (-0.05, 0.05)
                   },
                   rotate=(-10, 10),
                   shear=(-10, 10),
                   order=3,
                   mode="constant",
                   cval=(0, 255),
                   name="Affine_order_3_constant"),
        iaa.Affine(scale=(0.9, 1.1),
                   translate_percent={
                       "x": (-0.05, 0.05),
                       "y": (-0.05, 0.05)
                   },
                   rotate=(-10, 10),
                   shear=(-10, 10),
                   order=1,
                   mode="edge",
                   cval=(0, 255),
                   name="Affine_order_1_edge"),
        iaa.Affine(scale=(0.9, 1.1),
                   translate_percent={
                       "x": (-0.05, 0.05),
                       "y": (-0.05, 0.05)
                   },
                   rotate=(-10, 10),
                   shear=(-10, 10),
                   order=1,
                   mode="constant",
                   cval=(0, 255),
                   backend="skimage",
                   name="Affine_order_1_constant_skimage"),
        # TODO AffineCv2
        iaa.PiecewiseAffine(scale=(0.01, 0.05),
                            nb_rows=4,
                            nb_cols=4,
                            order=1,
                            mode="constant",
                            name="PiecewiseAffine_4x4_order_1_constant"),
        iaa.PiecewiseAffine(scale=(0.01, 0.05),
                            nb_rows=4,
                            nb_cols=4,
                            order=0,
                            mode="constant",
                            name="PiecewiseAffine_4x4_order_0_constant"),
        iaa.PiecewiseAffine(scale=(0.01, 0.05),
                            nb_rows=4,
                            nb_cols=4,
                            order=1,
                            mode="edge",
                            name="PiecewiseAffine_4x4_order_1_edge"),
        iaa.PiecewiseAffine(scale=(0.01, 0.05),
                            nb_rows=8,
                            nb_cols=8,
                            order=1,
                            mode="constant",
                            name="PiecewiseAffine_8x8_order_1_constant"),
        iaa.PerspectiveTransform(scale=(0.01, 0.05),
                                 keep_size=False,
                                 name="PerspectiveTransform"),
        iaa.PerspectiveTransform(scale=(0.01, 0.05),
                                 keep_size=True,
                                 name="PerspectiveTransform_keep_size"),
        iaa.ElasticTransformation(
            alpha=(1, 10),
            sigma=(0.5, 1.5),
            order=0,
            mode="constant",
            cval=0,
            name="ElasticTransformation_order_0_constant"),
        iaa.ElasticTransformation(
            alpha=(1, 10),
            sigma=(0.5, 1.5),
            order=1,
            mode="constant",
            cval=0,
            name="ElasticTransformation_order_1_constant"),
        iaa.ElasticTransformation(
            alpha=(1, 10),
            sigma=(0.5, 1.5),
            order=1,
            mode="nearest",
            cval=0,
            name="ElasticTransformation_order_1_nearest"),
        iaa.ElasticTransformation(
            alpha=(1, 10),
            sigma=(0.5, 1.5),
            order=1,
            mode="reflect",
            cval=0,
            name="ElasticTransformation_order_1_reflect"),
        iaa.Rot90((1, 3), keep_size=False, name="Rot90"),
        iaa.Rot90((1, 3), keep_size=True, name="Rot90_keep_size")
    ]
    augmenters_pooling = [
        iaa.AveragePooling(kernel_size=(1, 16),
                           keep_size=False,
                           name="AveragePooling"),
        iaa.AveragePooling(kernel_size=(1, 16),
                           keep_size=True,
                           name="AveragePooling_keep_size"),
        iaa.MaxPooling(kernel_size=(1, 16), keep_size=False,
                       name="MaxPooling"),
        iaa.MaxPooling(kernel_size=(1, 16),
                       keep_size=True,
                       name="MaxPooling_keep_size"),
        iaa.MinPooling(kernel_size=(1, 16), keep_size=False,
                       name="MinPooling"),
        iaa.MinPooling(kernel_size=(1, 16),
                       keep_size=True,
                       name="MinPooling_keep_size"),
        iaa.MedianPooling(kernel_size=(1, 16),
                          keep_size=False,
                          name="MedianPooling"),
        iaa.MedianPooling(kernel_size=(1, 16),
                          keep_size=True,
                          name="MedianPooling_keep_size")
    ]
    augmenters_segmentation = [
        iaa.Superpixels(p_replace=(0.05, 1.0),
                        n_segments=(10, 100),
                        max_size=64,
                        interpolation="cubic",
                        name="Superpixels_max_size_64_cubic"),
        iaa.Superpixels(p_replace=(0.05, 1.0),
                        n_segments=(10, 100),
                        max_size=64,
                        interpolation="linear",
                        name="Superpixels_max_size_64_linear"),
        iaa.Superpixels(p_replace=(0.05, 1.0),
                        n_segments=(10, 100),
                        max_size=128,
                        interpolation="linear",
                        name="Superpixels_max_size_128_linear"),
        iaa.Superpixels(p_replace=(0.05, 1.0),
                        n_segments=(10, 100),
                        max_size=224,
                        interpolation="linear",
                        name="Superpixels_max_size_224_linear"),
        iaa.UniformVoronoi(n_points=(250, 1000), name="UniformVoronoi"),
        iaa.RegularGridVoronoi(n_rows=(16, 31),
                               n_cols=(16, 31),
                               name="RegularGridVoronoi"),
        iaa.RelativeRegularGridVoronoi(n_rows_frac=(0.07, 0.14),
                                       n_cols_frac=(0.07, 0.14),
                                       name="RelativeRegularGridVoronoi"),
    ]
    augmenters_size = [
        iaa.Resize((0.8, 1.2), interpolation="nearest", name="Resize_nearest"),
        iaa.Resize((0.8, 1.2), interpolation="linear", name="Resize_linear"),
        iaa.Resize((0.8, 1.2), interpolation="cubic", name="Resize_cubic"),
        iaa.CropAndPad(percent=(-0.2, 0.2),
                       pad_mode="constant",
                       pad_cval=(0, 255),
                       keep_size=False,
                       name="CropAndPad"),
        iaa.CropAndPad(percent=(-0.2, 0.2),
                       pad_mode="edge",
                       pad_cval=(0, 255),
                       keep_size=False,
                       name="CropAndPad_edge"),
        iaa.CropAndPad(percent=(-0.2, 0.2),
                       pad_mode="constant",
                       pad_cval=(0, 255),
                       name="CropAndPad_keep_size"),
        iaa.Pad(percent=(0.05, 0.2),
                pad_mode="constant",
                pad_cval=(0, 255),
                keep_size=False,
                name="Pad"),
        iaa.Pad(percent=(0.05, 0.2),
                pad_mode="edge",
                pad_cval=(0, 255),
                keep_size=False,
                name="Pad_edge"),
        iaa.Pad(percent=(0.05, 0.2),
                pad_mode="constant",
                pad_cval=(0, 255),
                name="Pad_keep_size"),
        iaa.Crop(percent=(0.05, 0.2), keep_size=False, name="Crop"),
        iaa.Crop(percent=(0.05, 0.2), name="Crop_keep_size"),
        iaa.PadToFixedSize(width=width + 10,
                           height=height + 10,
                           pad_mode="constant",
                           pad_cval=(0, 255),
                           name="PadToFixedSize"),
        iaa.CropToFixedSize(width=width - 10,
                            height=height - 10,
                            name="CropToFixedSize"),
        iaa.KeepSizeByResize(iaa.CropToFixedSize(height=height - 10,
                                                 width=width - 10),
                             interpolation="nearest",
                             name="KeepSizeByResize_CropToFixedSize_nearest"),
        iaa.KeepSizeByResize(iaa.CropToFixedSize(height=height - 10,
                                                 width=width - 10),
                             interpolation="linear",
                             name="KeepSizeByResize_CropToFixedSize_linear"),
        iaa.KeepSizeByResize(iaa.CropToFixedSize(height=height - 10,
                                                 width=width - 10),
                             interpolation="cubic",
                             name="KeepSizeByResize_CropToFixedSize_cubic"),
    ]
    augmenters_weather = [
        iaa.FastSnowyLandscape(lightness_threshold=(100, 255),
                               lightness_multiplier=(1.0, 4.0),
                               name="FastSnowyLandscape"),
        iaa.Clouds(name="Clouds"),
        iaa.Fog(name="Fog"),
        iaa.CloudLayer(intensity_mean=(196, 255),
                       intensity_freq_exponent=(-2.5, -2.0),
                       intensity_coarse_scale=10,
                       alpha_min=0,
                       alpha_multiplier=(0.25, 0.75),
                       alpha_size_px_max=(2, 8),
                       alpha_freq_exponent=(-2.5, -2.0),
                       sparsity=(0.8, 1.0),
                       density_multiplier=(0.5, 1.0),
                       name="CloudLayer"),
        iaa.Snowflakes(name="Snowflakes"),
        iaa.SnowflakesLayer(density=(0.005, 0.075),
                            density_uniformity=(0.3, 0.9),
                            flake_size=(0.2, 0.7),
                            flake_size_uniformity=(0.4, 0.8),
                            angle=(-30, 30),
                            speed=(0.007, 0.03),
                            blur_sigma_fraction=(0.0001, 0.001),
                            name="SnowflakesLayer")
    ]

    augmenters = (augmenters_meta + augmenters_arithmetic + augmenters_blend +
                  augmenters_blur + augmenters_color + augmenters_contrast +
                  augmenters_convolutional + augmenters_edges +
                  augmenters_flip + augmenters_geometric + augmenters_pooling +
                  augmenters_segmentation + augmenters_size +
                  augmenters_weather)

    if only_augmenters is not None:
        augmenters_reduced = []
        for augmenter in augmenters:
            if any([
                    re.search(pattern, augmenter.name)
                    for pattern in only_augmenters
            ]):
                augmenters_reduced.append(augmenter)
        augmenters = augmenters_reduced

    return augmenters
Beispiel #18
0

def func_images(images, random_state, parents, hooks):
    result = []
    for image in images:
        image_aug = augment_slice_thickness(image, max_r=8)
        result.append(image_aug)

    return result


def func_keypoints(keypoints_on_images, random_state, parents, hooks):
    return keypoints_on_images


slice_thickness_augmenter = iaa.Lambda(func_images=func_images,
                                       func_keypoints=func_keypoints)


def adjust_input_image_size(img, input_size):
    s = img.shape
    new_s = [max(d, input_size[j]) for j, d in enumerate(s)]
    if sum(new_s) != sum(s):
        img = pad_image_to_size(img,
                                img_size=input_size[0:2],
                                mode='constant',
                                loc=[1, 2, 1])
    return img


def create_heatmap(lmap, y, sigma=1.5, downsample=2):
    lmap[y, :] = 1
    # Augmenter that changes the contrast of images using a unique formula (linear).
    # Multiplier for linear function is between lo and hi, sampled randomly per image
    # For percent of all images values are sampled independently per channel.
    "Linear_Contrast":
    lambda lo, hi, percent: iaa.LinearContrast((lo, hi), per_channel=percent),

    # Augmenter that changes the contrast of images using a unique formula (using log).
    # Multiplier for log function is between lo and hi, sampled randomly per image.
    # For percent of all images values are sampled independently per channel.
    # Values around 1.0 lead to a contrast-adjusted images. Values above 1.0 quickly lead to partially broken
    # images due to exceeding the datatype’s value range.
    "Log_Contrast":
    lambda lo, hi, percent: iaa.LogContrast((lo, hi), per_channel=percent),

    # Augmenter that changes the contrast of images using a unique formula (sigmoid).
    # Multiplier for sigmoid function is between lo and hi, sampled randomly per image. c_lo and c_hi decide the
    # cutoff value that shifts the sigmoid function in horizontal direction (Higher values mean that the switch
    # from dark to light pixels happens later, i.e. the pixels will remain darker).
    # For percent of all images values are sampled independently per channel:
    "Sigmoid_Contrast":
    lambda lo, hi, c_lo, c_hi, percent: iaa.SigmoidContrast(
        (lo, hi), (c_lo, c_hi), per_channel=percent),

    # Augmenter that calls a custom (lambda) function for each batch of input image.
    # Extracts Canny Edges from images (refer to description in CO)
    # Good default values for min and max are 100 and 200
    'Custom_Canny_Edges':
    lambda min_val, max_val: iaa.Lambda(func_images=CO.Edges(
        min_value=min_val, max_value=max_val)),
}
        [
        iaa.OneOf(
            [   
            # Blur each image using a median over neihbourhoods that have a random size between 3x3 and 7x7
            sometimes(iaa.MedianBlur(k=(3, 7))),
            # blur images using gaussian kernels with random value (sigma) from the interval [a, b]
            sometimes(iaa.GaussianBlur(sigma=(0.0, 1.0))),
            sometimes(iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05 * 255), per_channel=0.5))
            ]
        ),
        iaa.Sequential(
            [
            sometimes(iaa.AddToHue((-8, 8))),
            sometimes(iaa.AddToSaturation((-20, 20))),
            sometimes(iaa.AddToBrightness((-26, 26))),
            sometimes(iaa.Lambda(func_images = add_to_contrast))
            ], random_order=True)
        ], random_order=True)
#%%
for p in range(iterations): # how many times to apply random augmentations
    for idx in trange(len(img_path), desc='Augumenting Dataset (iteration {}of{})'.format(p+1, iterations)):
        
        img = cv2.imread(img_path[idx])
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        filepath = xml_path[idx]
        
        full_dict = xmltodict.parse(open( filepath , 'rb' ))
        
        # Extracting the coords and class names from xml file
        names = []
        coords = []
Beispiel #21
0
    def __get_augmentation(self, mode, rng):
        if mode == "train":
            shape_augs = [
                # * order = ``0`` -> ``cv2.INTER_NEAREST``
                # * order = ``1`` -> ``cv2.INTER_LINEAR``
                # * order = ``2`` -> ``cv2.INTER_CUBIC``
                # * order = ``3`` -> ``cv2.INTER_CUBIC``
                # * order = ``4`` -> ``cv2.INTER_CUBIC``
                # ! for pannuke v0, no rotation or translation, just flip to avoid mirror padding
                iaa.Affine(
                    # scale images to 80-120% of their size, individually per axis
                    scale={
                        "x": (0.8, 1.2),
                        "y": (0.8, 1.2)
                    },
                    # translate by -A to +A percent (per axis)
                    translate_percent={
                        "x": (-0.01, 0.01),
                        "y": (-0.01, 0.01)
                    },
                    shear=(-5, 5),  # shear by -5 to +5 degrees
                    rotate=(-179, 179),  # rotate by -179 to +179 degrees
                    order=0,  # use nearest neighbour
                    backend="cv2",  # opencv for fast processing
                    seed=rng,
                ),
                # set position to 'center' for center crop
                # else 'uniform' for random crop
                iaa.CropToFixedSize(self.input_shape[0],
                                    self.input_shape[1],
                                    position="center"),
                iaa.Fliplr(0.5, seed=rng),
                iaa.Flipud(0.5, seed=rng),
            ]

            input_augs = [
                iaa.OneOf([
                    iaa.Lambda(
                        seed=rng,
                        func_images=lambda *args: gaussian_blur(*args,
                                                                max_ksize=3),
                    ),
                    iaa.Lambda(
                        seed=rng,
                        func_images=lambda *args: median_blur(*args,
                                                              max_ksize=3),
                    ),
                    iaa.AdditiveGaussianNoise(loc=0,
                                              scale=(0.0, 0.05 * 255),
                                              per_channel=0.5),
                ]),
                iaa.Sequential(
                    [
                        iaa.Lambda(
                            seed=rng,
                            func_images=lambda *args: add_to_hue(
                                *args, range=(-8, 8)),
                        ),
                        iaa.Lambda(
                            seed=rng,
                            func_images=lambda *args: add_to_saturation(
                                *args, range=(-0.2, 0.2)),
                        ),
                        iaa.Lambda(
                            seed=rng,
                            func_images=lambda *args: add_to_brightness(
                                *args, range=(-26, 26)),
                        ),
                        iaa.Lambda(
                            seed=rng,
                            func_images=lambda *args: add_to_contrast(
                                *args, range=(0.75, 1.25)),
                        ),
                    ],
                    random_order=True,
                ),
            ]
        elif mode == "valid":
            shape_augs = [
                # set position to 'center' for center crop
                # else 'uniform' for random crop
                iaa.CropToFixedSize(self.input_shape[0],
                                    self.input_shape[1],
                                    position="center")
            ]
            input_augs = []

        return shape_augs, input_augs
Beispiel #22
0
    for img in image:
        new_img.append(
            cv2.copyMakeBorder(img, 14, 13, 14, 13, cv2.BORDER_REFLECT))

    return np.stack(new_img)


def _reflective_padding_228(image, random_state, parents, hooks):
    new_img = []
    for img in image:
        double = cv2.resize(image, (202, 202), interpolation=cv2.INTER_CUBIC)
        new_img.append(
            cv2.copyMakeBorder(double, 13, 13, 13, 13, cv2.BORDER_REFLECT))

    return np.stack(new_img)


channel_augmenter = iaa.Lambda(func_images=_combination, func_keypoints=None)
reflect_resize_128 = iaa.Lambda(func_images=_reflective_padding_128,
                                func_keypoints=None)
reflect_resize_228 = iaa.Lambda(func_images=_reflective_padding_228,
                                func_keypoints=None)

# Collect all into a dictionary
defined = {
    'intensity_seq': intensity_seq,
    'channel_augmenter': channel_augmenter,
    'resize_128': reflect_resize_128,
    'resize_228': reflect_resize_228
}
Beispiel #23
0
    weight = w0 * np.exp(
        -np.divide(np.power(np.min(distance_map, axis=-1), 2), 2 * sigma**2))

    # Load into the weighted mask, the original mask at the points of dilution
    loc = all_dil_contours_together == 0
    weight[loc] = img[loc]
    # This is for debugging only, will show original contour
    # _, original_contours, _ = cv2.findContours(img.copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    # all_contours_together = cv2.drawContours(img_empty.copy(), original_contours, -1, (0, 0), 0)
    # weight[all_contours_together == 0] = 0

    return weight


def _smooth_wrapper(images, random_state, parents, hooks):
    new_img = []
    for img in images:
        if img.sum() == 0:
            new_img.append(img)
            continue
        img = np.expand_dims(get_smoothed_mask(img.squeeze()), 2)
        new_img.append(img)

    return np.array(new_img)


mask_smoother_augmenter = iaa.Lambda(func_images=_smooth_wrapper,
                                     func_keypoints=None)

# Collect all into a dictionary
defined = {'mask_smooth': mask_smoother_augmenter}
def main():
    augseq = iaa.Sequential(
        [iaa.Fliplr(0.5),
         iaa.CoarseDropout(p=0.1, size_percent=0.1)])

    def func_images(images, random_state, parents, hooks):
        time.sleep(0.2)
        return images

    def func_heatmaps(heatmaps, random_state, parents, hooks):
        return heatmaps

    def func_keypoints(keypoints_on_images, random_state, parents, hooks):
        return keypoints_on_images

    augseq_slow = iaa.Sequential([
        iaa.Fliplr(0.5),
        iaa.Lambda(func_images=func_images,
                   func_heatmaps=func_heatmaps,
                   func_keypoints=func_keypoints)
    ])

    print("------------------")
    print("augseq.augment_batches(batches, background=True)")
    print("------------------")
    batches = list(load_images())
    batches_aug = augseq.augment_batches(batches, background=True)
    images_aug = []
    keypoints_aug = []
    for batch_aug in batches_aug:
        images_aug.append(batch_aug.images_aug)
        keypoints_aug.append(batch_aug.keypoints_aug)
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("augseq.augment_batches(batches, background=True) -> only images")
    print("------------------")
    batches = list(load_images())
    batches = [batch.images for batch in batches]
    batches_aug = augseq.augment_batches(batches, background=True)
    images_aug = []
    keypoints_aug = None
    for batch_aug in batches_aug:
        images_aug.append(batch_aug)
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("BackgroundAugmenter")
    print("------------------")
    batch_loader = ia.BatchLoader(load_images)
    bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq)
    images_aug = []
    keypoints_aug = []
    while True:
        print("Next batch...")
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        images_aug.append(batch.images_aug)
        keypoints_aug.append(batch.keypoints_aug)
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("BackgroundAugmenter with generator in BL")
    print("------------------")
    batch_loader = ia.BatchLoader(load_images())
    bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq)
    images_aug = []
    keypoints_aug = []
    while True:
        print("Next batch...")
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        images_aug.append(batch.images_aug)
        keypoints_aug.append(batch.keypoints_aug)
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("Long running BackgroundAugmenter at BL-queue_size=12")
    print("------------------")
    batch_loader = ia.BatchLoader(load_images(n_batches=1000), queue_size=12)
    bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq)
    i = 0
    while True:
        if i % 100 == 0:
            print("batch=%d..." % (i, ))
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        i += 1

    print("------------------")
    print("Long running BackgroundAugmenter at BL-queue_size=2")
    print("------------------")
    batch_loader = ia.BatchLoader(load_images(n_batches=1000), queue_size=2)
    bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq)
    i = 0
    while True:
        if i % 100 == 0:
            print("batch=%d..." % (i, ))
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        i += 1

    print("------------------")
    print("Long running BackgroundAugmenter (slow loading)")
    print("------------------")
    batch_loader = ia.BatchLoader(load_images(n_batches=100, sleep=0.2))
    bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq)
    i = 0
    while True:
        if i % 10 == 0:
            print("batch=%d..." % (i, ))
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        i += 1

    print("------------------")
    print("Long running BackgroundAugmenter (slow aug) at BL-queue_size=12")
    print("------------------")
    batch_loader = ia.BatchLoader(load_images(n_batches=100), queue_size=12)
    bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq_slow)
    i = 0
    while True:
        if i % 10 == 0:
            print("batch=%d..." % (i, ))
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        i += 1

    print("------------------")
    print("Long running BackgroundAugmenter (slow aug) at BL-queue_size=2")
    print("------------------")
    batch_loader = ia.BatchLoader(load_images(n_batches=100), queue_size=2)
    bg_augmenter = ia.BackgroundAugmenter(batch_loader, augseq_slow)
    i = 0
    while True:
        if i % 10 == 0:
            print("batch=%d..." % (i, ))
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        i += 1
Beispiel #25
0
def train(model, dataset_dir, subset):
    """Train the model."""
    # Training dataset.
    dataset_train = MonusegDataset()
    dataset_train.load_monuseg(dataset_dir, subset)
    dataset_train.prepare()
    dataset_train.read_data_and_mask_arr()

    # Validation dataset
    dataset_val = MonusegDataset()
    dataset_val.load_monuseg(dataset_dir, "val")
    dataset_val.prepare()
    dataset_val.read_data_and_mask_arr()

    # Image augmentation
    # http://imgaug.readthedocs.io/en/latest/source/augmenters.html
    def image_channel_suffle(image):
        img_ = image.copy()
        r, g, b = img_[:, :, 0], img_[:, :, 1], img_[:, :, 2]
        idx = [0, 1, 2]
        np.random.shuffle(idx)
        img_[:, :, idx[0]], img_[:, :, idx[1]], img_[:, :, idx[2]] = r, g, b
        return img_

    def img_func(images, random_state, parents, hooks):
        for img in images:
            img = image_channel_suffle(img)
        return images

    def keypoint_func(keypoints_on_images, random_state, parents, hooks):
        return keypoints_on_images

    augmentation = iaa.SomeOf((0, 4), [
        iaa.Fliplr(0.5),
        iaa.Flipud(0.5),
        iaa.OneOf([
            iaa.Affine(rotate=90),
            iaa.Affine(rotate=180),
            iaa.Affine(rotate=270)
        ]),
        iaa.Multiply((0.8, 1.3)),
        iaa.Affine(scale=(0.5, 2.0)),
        iaa.Lambda(img_func, keypoint_func)
    ])

    # *** This training schedule is an example. Update to your needs ***

    # If starting from imagenet, train heads only for a bit
    # since they have random weights
    print("Train network heads")
    model.train(dataset_train,
                dataset_val,
                learning_rate=config.LEARNING_RATE,
                epochs=20,
                augmentation=augmentation,
                layers='heads')

    print("Train all layers")
    model.train(dataset_train,
                dataset_val,
                learning_rate=3 * 10e-5,
                epochs=4000,
                augmentation=augmentation,
                layers='all')
def main():
    augseq = iaa.Sequential(
        [iaa.Fliplr(0.5),
         iaa.CoarseDropout(p=0.1, size_percent=0.1)])

    def func_images(images, random_state, parents, hooks):
        time.sleep(0.2)
        return images

    def func_heatmaps(heatmaps, random_state, parents, hooks):
        return heatmaps

    def func_keypoints(keypoints_on_images, random_state, parents, hooks):
        return keypoints_on_images

    augseq_slow = iaa.Sequential([
        iaa.Fliplr(0.5),
        iaa.Lambda(func_images=func_images,
                   func_heatmaps=func_heatmaps,
                   func_keypoints=func_keypoints)
    ])

    print("------------------")
    print("augseq.augment_batches(batches, background=True)")
    print("------------------")
    batches = list(load_images())
    batches_aug = augseq.augment_batches(batches, background=True)
    images_aug = []
    keypoints_aug = []
    for batch_aug in batches_aug:
        images_aug.append(batch_aug.images_aug)
        keypoints_aug.append(batch_aug.keypoints_aug)
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("augseq.augment_batches(batches, background=True) -> only images")
    print("------------------")
    batches = list(load_images())
    batches = [batch.images_unaug for batch in batches]
    batches_aug = augseq.augment_batches(batches, background=True)
    images_aug = []
    keypoints_aug = None
    for batch_aug in batches_aug:
        images_aug.append(batch_aug)
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("BackgroundAugmenter")
    print("------------------")
    batch_loader = multicore.BatchLoader(load_images)
    bg_augmenter = multicore.BackgroundAugmenter(batch_loader, augseq)
    images_aug = []
    keypoints_aug = []
    while True:
        print("Next batch...")
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        images_aug.append(batch.images_aug)
        keypoints_aug.append(batch.keypoints_aug)
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("BackgroundAugmenter with generator in BL")
    print("------------------")
    batch_loader = multicore.BatchLoader(load_images())
    bg_augmenter = multicore.BackgroundAugmenter(batch_loader, augseq)
    images_aug = []
    keypoints_aug = []
    while True:
        print("Next batch...")
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        images_aug.append(batch.images_aug)
        keypoints_aug.append(batch.keypoints_aug)
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("Long running BackgroundAugmenter at BL-queue_size=12")
    print("------------------")
    batch_loader = multicore.BatchLoader(load_images(n_batches=1000),
                                         queue_size=12)
    bg_augmenter = multicore.BackgroundAugmenter(batch_loader, augseq)
    i = 0
    while True:
        if i % 100 == 0:
            print("batch=%d..." % (i, ))
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        i += 1

    print("------------------")
    print("Long running BackgroundAugmenter at BL-queue_size=2")
    print("------------------")
    batch_loader = multicore.BatchLoader(load_images(n_batches=1000),
                                         queue_size=2)
    bg_augmenter = multicore.BackgroundAugmenter(batch_loader, augseq)
    i = 0
    while True:
        if i % 100 == 0:
            print("batch=%d..." % (i, ))
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        i += 1

    print("------------------")
    print("Long running BackgroundAugmenter (slow loading)")
    print("------------------")
    batch_loader = multicore.BatchLoader(load_images(n_batches=100, sleep=0.2))
    bg_augmenter = multicore.BackgroundAugmenter(batch_loader, augseq)
    i = 0
    while True:
        if i % 10 == 0:
            print("batch=%d..." % (i, ))
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        i += 1

    print("------------------")
    print("Long running BackgroundAugmenter (slow aug) at BL-queue_size=12")
    print("------------------")
    batch_loader = multicore.BatchLoader(load_images(n_batches=100),
                                         queue_size=12)
    bg_augmenter = multicore.BackgroundAugmenter(batch_loader, augseq_slow)
    i = 0
    while True:
        if i % 10 == 0:
            print("batch=%d..." % (i, ))
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        i += 1

    print("------------------")
    print("Long running BackgroundAugmenter (slow aug) at BL-queue_size=2")
    print("------------------")
    batch_loader = multicore.BatchLoader(load_images(n_batches=100),
                                         queue_size=2)
    bg_augmenter = multicore.BackgroundAugmenter(batch_loader, augseq_slow)
    i = 0
    while True:
        if i % 10 == 0:
            print("batch=%d..." % (i, ))
        batch = bg_augmenter.get_batch()
        if batch is None:
            print("Finished.")
            break
        i += 1

    for augseq_i in [augseq, augseq_slow]:
        print("------------------")
        print("Many very small runs (batches=1)")
        print("------------------")
        for i in range(100):
            batch_loader = multicore.BatchLoader(load_images(n_batches=1),
                                                 queue_size=100)
            bg_augmenter = multicore.BackgroundAugmenter(
                batch_loader, augseq_i)
            while True:
                batch = bg_augmenter.get_batch()
                if batch is None:
                    print("Finished (%d/%d)." % (i + 1, 100))
                    break

        print("------------------")
        print("Many very small runs (batches=2)")
        print("------------------")
        for i in range(100):
            batch_loader = multicore.BatchLoader(load_images(n_batches=2),
                                                 queue_size=100)
            bg_augmenter = multicore.BackgroundAugmenter(
                batch_loader, augseq_i)
            while True:
                batch = bg_augmenter.get_batch()
                if batch is None:
                    print("Finished (%d/%d)." % (i + 1, 100))
                    break

        print("------------------")
        print("Many very small runs, separate function (batches=1)")
        print("------------------")

        def _augment_small_1():
            batch_loader = multicore.BatchLoader(load_images(n_batches=1),
                                                 queue_size=100)
            bg_augmenter = multicore.BackgroundAugmenter(
                batch_loader, augseq_i)
            i = 0
            while True:
                batch = bg_augmenter.get_batch()
                if batch is None:
                    break
                i += 1

        for i in range(100):
            _augment_small_1()
            print("Finished (%d/%d)." % (i + 1, 100))

        print("------------------")
        print("Many very small runs, separate function (batches=2)")
        print("------------------")

        def _augment_small_2():
            batch_loader = multicore.BatchLoader(load_images(n_batches=2),
                                                 queue_size=100)
            bg_augmenter = multicore.BackgroundAugmenter(
                batch_loader, augseq_i)
            i = 0
            while True:
                batch = bg_augmenter.get_batch()
                if batch is None:
                    break
                i += 1

        for i in range(100):
            _augment_small_2()
            print("Finished (%d/%d)." % (i + 1, 100))

        print("------------------")
        print(
            "Many very small runs, separate function, incomplete fetching (batches=2)"
        )
        print("------------------")

        def _augment_small_3():
            batch_loader = multicore.BatchLoader(load_images(n_batches=2),
                                                 queue_size=100)
            bg_augmenter = multicore.BackgroundAugmenter(
                batch_loader, augseq_i)
            batch = bg_augmenter.get_batch()

        for i in range(100):
            _augment_small_3()
            print("Finished (%d/%d)." % (i + 1, 100))

    #for augseq_i in [augseq, augseq_slow]:
        print("------------------")
        print(
            "Many very small runs, separate function, incomplete fetching (batches=10)"
        )
        print("------------------")

        def _augment_small_4():
            batch_loader = multicore.BatchLoader(load_images(n_batches=10),
                                                 queue_size=100)
            bg_augmenter = multicore.BackgroundAugmenter(
                batch_loader, augseq_i)
            batch = bg_augmenter.get_batch()
            #bg_augmenter.terminate()

        for i in range(100):
            _augment_small_4()
            print("Finished (%d/%d)." % (i + 1, 100))
Beispiel #27
0
def main():
    augseq = iaa.Sequential(
        [iaa.Fliplr(0.5),
         iaa.CoarseDropout(p=0.1, size_percent=0.1)])

    def func_images(images, random_state, parents, hooks):
        time.sleep(0.2)
        return images

    def func_heatmaps(heatmaps, random_state, parents, hooks):
        return heatmaps

    def func_keypoints(keypoints_on_images, random_state, parents, hooks):
        return keypoints_on_images

    augseq_slow = iaa.Sequential([
        iaa.Fliplr(0.5),
        iaa.Lambda(func_images=func_images,
                   func_heatmaps=func_heatmaps,
                   func_keypoints=func_keypoints)
    ])

    print("------------------")
    print(".pool()")
    print("------------------")
    with augseq.pool() as pool:
        time_start = time.time()
        batches = list(load_images())
        batches_aug = pool.map_batches(batches)
        images_aug = []
        keypoints_aug = []
        for batch_aug in batches_aug:
            images_aug.append(batch_aug.images_aug)
            keypoints_aug.append(batch_aug.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    # ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("Pool.map_batches(batches)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches = list(load_images())
        batches_aug = pool.map_batches(batches)
        images_aug = []
        keypoints_aug = []
        for batch_aug in batches_aug:
            images_aug.append(batch_aug.images_aug)
            keypoints_aug.append(batch_aug.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    # ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("Pool.imap_batches(batches)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images())
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    # ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("Pool.imap_batches(batches, chunksize=32)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=1000),
                                        chunksize=32)
        count = 0
        for batch in batches_aug:
            count += 1
        assert count == 1000
        print("Done in %.4fs" % (time.time() - time_start, ))

    print("------------------")
    print("Pool.imap_batches(batches, chunksize=2)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=1000),
                                        chunksize=2)
        count = 0
        for batch in batches_aug:
            count += 1
        assert count == 1000
        print("Done in %.4fs" % (time.time() - time_start, ))

    print("------------------")
    print("Pool.imap_batches(batches, chunksize=1)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=1000),
                                        chunksize=1)
        count = 0
        for batch in batches_aug:
            count += 1
        assert count == 1000
        print("Done in %.4fs" % (time.time() - time_start, ))

    print("------------------")
    print("Pool.map_batches(batches, chunksize=32)")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.map_batches(list(load_images(n_batches=1000)),
                                       chunksize=32)
        assert len(batches_aug) == 1000
        print("Done in %.4fs" % (time.time() - time_start, ))

    print("------------------")
    print("Pool.map_batches chunksize with fast aug")
    print("------------------")

    def test_fast(processes, chunksize):
        augseq = iaa.Dropout(0.1)
        with multicore.Pool(augseq, processes=processes) as pool:
            batches = list(load_images(n_batches=10000, draw_text=False))
            time_start = time.time()
            batches_aug = pool.map_batches(batches, chunksize=chunksize)
            assert len(batches_aug) == 10000
            print("chunksize=%d, worker=%s, time=%.4fs" %
                  (chunksize, processes, time.time() - time_start))

    test_fast(-4, 1)
    test_fast(1, 1)
    test_fast(None, 1)
    test_fast(1, 4)
    test_fast(None, 4)
    test_fast(1, 32)
    test_fast(None, 32)

    print("------------------")
    print("Pool.imap_batches chunksize with fast aug")
    print("------------------")

    def test_fast_imap(processes, chunksize):
        augseq = iaa.Dropout(0.1)
        with multicore.Pool(augseq, processes=processes) as pool:
            time_start = time.time()
            batches_aug = pool.imap_batches(load_images(n_batches=10000,
                                                        draw_text=False),
                                            chunksize=chunksize)
            batches_aug = list(batches_aug)
            assert len(batches_aug) == 10000
            print("chunksize=%d, worker=%s, time=%.4fs" %
                  (chunksize, processes, time.time() - time_start))

    test_fast_imap(-4, 1)
    test_fast_imap(1, 1)
    test_fast_imap(None, 1)
    test_fast_imap(1, 4)
    test_fast_imap(None, 4)
    test_fast_imap(1, 32)
    test_fast_imap(None, 32)

    print("------------------")
    print("Pool.map_batches with computationally expensive aug")
    print("------------------")

    def test_heavy(processes, chunksize):
        augseq_heavy = iaa.PiecewiseAffine(scale=0.2, nb_cols=8, nb_rows=8)
        with multicore.Pool(augseq_heavy, processes=processes) as pool:
            batches = list(load_images(n_batches=500, draw_text=False))
            time_start = time.time()
            batches_aug = pool.map_batches(batches, chunksize=chunksize)
            assert len(batches_aug) == 500
            print("chunksize=%d, worker=%s, time=%.4fs" %
                  (chunksize, processes, time.time() - time_start))

    test_heavy(-4, 1)
    test_heavy(1, 1)
    test_heavy(None, 1)
    test_heavy(1, 4)
    test_heavy(None, 4)
    test_heavy(1, 32)
    test_heavy(None, 32)

    print("------------------")
    print("Pool.imap_batches(batches), slow loading")
    print("------------------")
    with multicore.Pool(augseq) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=100, sleep=0.2))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))

    print("------------------")
    print("Pool.imap_batches(batches), maxtasksperchild=4")
    print("------------------")
    with multicore.Pool(augseq, maxtasksperchild=4) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=100))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    print("------------------")
    print("Pool.imap_batches(batches), seed=1")
    print("------------------")
    # we color here the images of the first worker to see in the grids which images belong to one worker
    with PoolWithMarkedWorker(augseq, seed=1) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=4))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    grid_a = draw_grid(images_aug, keypoints_aug)

    with multicore.Pool(augseq, seed=1) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=4))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    grid_b = draw_grid(images_aug, keypoints_aug)

    grid_b[:, 0:2, 0] = 0
    grid_b[:, 0:2, 1] = 255
    grid_b[:, 0:2, 2] = 0
    ia.imshow(np.hstack([grid_a, grid_b]))

    print("------------------")
    print("Pool.imap_batches(batches), seed=None")
    print("------------------")
    with multicore.Pool(augseq, seed=None) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=4))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    grid_a = draw_grid(images_aug, keypoints_aug)

    with multicore.Pool(augseq, seed=None) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=4))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    grid_b = draw_grid(images_aug, keypoints_aug)

    ia.imshow(np.hstack([grid_a, grid_b]))

    print("------------------")
    print("Pool.imap_batches(batches), maxtasksperchild=4, seed=1")
    print("------------------")
    with multicore.Pool(augseq, maxtasksperchild=4, seed=1) as pool:
        time_start = time.time()
        batches_aug = pool.imap_batches(load_images(n_batches=100))
        images_aug = []
        keypoints_aug = []
        for batch in batches_aug:
            images_aug.append(batch.images_aug)
            keypoints_aug.append(batch.keypoints_aug)
        print("Done in %.4fs" % (time.time() - time_start, ))
    ia.imshow(draw_grid(images_aug, keypoints_aug))

    for augseq_i in [augseq, augseq_slow]:
        print("------------------")
        print("Many very small runs (batches=1)")
        print("------------------")
        with multicore.Pool(augseq_i) as pool:
            time_start = time.time()
            for i in range(100):
                _ = pool.map_batches(list(load_images(n_batches=1)))
            print("Done in %.4fs" % (time.time() - time_start, ))

        print("------------------")
        print("Many very small runs (batches=2)")
        print("------------------")
        with multicore.Pool(augseq_i) as pool:
            time_start = time.time()
            for i in range(100):
                _ = pool.map_batches(list(load_images(n_batches=2)))
            print("Done in %.4fs" % (time.time() - time_start, ))
Beispiel #28
0
                images[i] = [(images[i][..., c] - self.mean[c]) / self.std[c]
                             for c in range(self.n_chans)]

            images[i] = np.moveaxis(np.array(images[i]), 0, -1)
            images[i] = images[i].astype(float)
        return images

    def _augment_keypoints(self, keypoints_on_images, random_state, parents,
                           hooks):
        return keypoints_on_images

    def _augment_heatmaps(self, heatmaps, random_state, parents, hooks):
        return heatmaps

    def get_parameters(self):
        return [self.mean, self.std]


def rescale_images(images, random_state, parents, hooks):

    result = []
    for image in images:
        image_aug = np.copy(image)
        if (image.dtype == np.uint8):
            image_aug = image_aug / 255
        result.append(image_aug)
    return result


rescale_augmenter = iaa.Lambda(func_images=rescale_images)
Beispiel #29
0
model = Sequential([
    siamese_nets.get_layer('branch_model'),
    GramMatrix(kernel=siamese_nets.get_layer('head_model')),
])

#%% Init training
preprocessing = iaa.Sequential([
    iaa.Fliplr(0.5),
    iaa.Flipud(0.5),
    iaa.Affine(rotate=(-180, 180)),
    iaa.CropToFixedSize(224, 224, position='center'),
    iaa.PadToFixedSize(224, 224, position='center'),
    iaa.AssertShape((None, 224, 224, 3)),
    iaa.Lambda(lambda images_list, *_: (getattr(
        keras_applications, branch_model_name.lower()).preprocess_input(
            np.stack(images_list), data_format='channels_last'))),
])
batch_size = 64
callbacks = [
    TensorBoard(output_folder, write_images=True, histogram_freq=1),
    ModelCheckpoint(
        str(output_folder / 'kernel_loss_best_loss_weights.h5'),
        save_best_only=True,
        save_weights_only=True,
    ),
    ModelCheckpoint(
        str(output_folder / 'kernel_loss_best_accuracy_weights.h5'),
        save_best_only=True,
        save_weights_only=True,
        monitor='val_accuracy',
Beispiel #30
0
def rescale_images(images, random_state, parents, hooks):

    result = []
    for image in images:
        image_aug = np.copy(image)
        if (image.dtype == np.uint8):
            image_aug = image_aug / 255
        result.append(image_aug)
    return result


void_fun = lambda x, random_state, parents, hooks: x

rescale_augmenter = iaa.Lambda(
    func_images=rescale_images,
    func_heatmaps=void_fun,
    func_keypoints=void_fun)


def train(cfg):

    # make run_dir with date
    import pdb; pdb.set_trace() ## DEBUG ##
    d = datetime.datetime.now()
    run_dir = pjoin(cfg.out_dir, 'exp_{:%Y-%m-%d_%H-%M}'.format(d))

    if(not os.path.exists(run_dir)):
        os.makedirs(run_dir)

    with open(pjoin(run_dir, 'cfg.yml'), 'w') as outfile:
        yaml.dump(cfg.__dict__, stream=outfile, default_flow_style=False)