コード例 #1
0
def make(p=0.5):
    return Compose(
        [
            OneOf([IAAAdditiveGaussianNoise(),
                   GaussNoise(),
                   ISONoise()],
                  p=0.9),
            MotionBlur(p=0.3),
            ShiftScaleRotate(shift_limit=0.0925,
                             scale_limit=0.4,
                             rotate_limit=7,
                             border_mode=cv2.BORDER_CONSTANT,
                             value=0,
                             p=0.6),
            # IAAPerspective(scale=(.055, .060), keep_size=False, p=.2),
            # OpticalDistortion(p=0.2),
            OneOf([
                CLAHE(clip_limit=2),
                IAASharpen(),
                IAAEmboss(),
                RandomBrightnessContrast(),
            ],
                  p=0.3),
            HueSaturationValue(p=0.3),
            RGBShift(40, 40, 40)
        ],
        p=p)
コード例 #2
0
    def __init__(self, is_train: bool, to_pytorch: bool, preprocess):
        if is_train:
            self._aug = Compose([
                preprocess,
                OneOf([
                    Compose([
                        HorizontalFlip(p=0.5),
                        GaussNoise(p=0.5),
                        OneOf([
                            RandomBrightnessContrast(),
                            RandomGamma(),
                        ],
                              p=0.5),
                        Rotate(limit=20, border_mode=cv2.BORDER_CONSTANT),
                        ImageCompression(),
                        CLAHE(),
                        Downscale(scale_min=0.2, scale_max=0.9, p=0.5),
                        ISONoise(p=0.5),
                        MotionBlur(p=0.5)
                    ]),
                    HorizontalFlip(p=0.5)
                ])
            ],
                                p=1)
        else:
            self._aug = preprocess

        self._need_to_pytorch = to_pytorch
コード例 #3
0
ファイル: oct_augmentations.py プロジェクト: theislab/LODE
def get_augmentations(params):
    augmentations = {
        "light": Compose([
            HorizontalFlip(p = 0.5),
            Rotate(p = 0.5)])
        ,

        "medium": Compose([
            OneOf([RandomSizedCrop(min_max_height = (params.img_shape - 40, params.img_shape - 40),
                                   height = params.img_shape,
                                   width = params.img_shape,
                                   p = 0.1),
                   PadIfNeeded(min_height = params.img_shape, min_width = params.img_shape, p = 1)], p = 1),

            VerticalFlip(p = 0.2),
            RandomRotate90(p = 0.2),
            Rotate(p = 0.5),
            RandomBrightnessContrast(brightness_limit = 0.6, contrast_limit = 0.6, p = 0.1),
            GaussNoise(p = 0.1, var_limit = (10.0, 25.0)),
            ISONoise(color_shift = (0.01, 0.5), intensity = (0.1, 0.9), p = 0.1),
            RandomGamma(gamma_limit = (50, 150), p = 0.1)],
        )}
    return augmentations
コード例 #4
0
def strong_aug(image, p=0.5):
    image2 = Compose(
        [  #加躁
            OneOf([
                IAAAdditiveGaussianNoise(),
                GaussNoise(),
                ISONoise(),
            ],
                  p=0.2),
            OneOf(
                [  #模糊
                    MotionBlur(p=0.1),
                    MedianBlur(blur_limit=3, p=0.1),
                    Blur(blur_limit=3, p=0.1),
                    JpegCompression(p=.1),
                ],
                p=0.2),
            OneOf(
                [  #锐化
                    CLAHE(clip_limit=2),
                    IAASharpen(),
                    IAAEmboss(),
                    RandomBrightnessContrast(),
                ],
                p=0.3),
            OneOf(
                [  #直方图均衡,对比度,色度变化,pca
                    HueSaturationValue(),
                    RandomBrightnessContrast(),
                    Equalize(),
                    # FancyPCA(),
                ],
                p=0.3),
            ToGray(p=0.1),
        ],
        p=p)(image=image)['image']
    return image2
コード例 #5
0
)

transformimg = transforms.Compose(
    [
        transforms.ToTensor(),
        transforms.Normalize(
            mean=[0.485, 0.456, 0.406][::-1], std=[0.225, 0.224, 0.225][::-1]
        ),
    ]
)

transformaug = Compose(
    [
        VerticalFlip(p=0.5),
        RandomRotate90(p=0.5),
        ISONoise(p=0.5),
        RandomBrightnessContrast(p=0.5),
        RandomGamma(p=0.5),
        RandomFog(fog_coef_lower=0.025, fog_coef_upper=0.1, p=0.5),
    ]
)


class XViewDataset(Dataset):
    def __init__(
        self, size=None, aug=True, pattern="data/train/images1024/*pre_disaster*.png"
    ):
        self.name = "train"
        self.aug = aug
        self.pre = glob(pattern)
        if size:
コード例 #6
0
def transform(image, mask, image_name, mask_name):

    x, y = image, mask

    rand = random.uniform(0, 1)
    if (rand > 0.5):

        images_name = [f"{image_name}"]
        masks_name = [f"{mask_name}"]
        images_aug = [x]
        masks_aug = [y]

        it = iter(images_name)
        it2 = iter(images_aug)
        imagedict = dict(zip(it, it2))

        it = iter(masks_name)
        it2 = iter(masks_aug)
        masksdict = dict(zip(it, it2))

        return imagedict, masksdict

    mask_density = np.count_nonzero(y)

    ## Augmenting only images with Gloms
    if (mask_density > 0):
        try:
            h, w, c = x.shape
        except Exception as e:
            image = image[:-1]
            x, y = image, mask
            h, w, c = x.shape

        aug = Blur(p=1, blur_limit=3)
        augmented = aug(image=x, mask=y)
        x0 = augmented['image']
        y0 = augmented['mask']

        #    aug = CenterCrop(p=1, height=32, width=32)
        #    augmented = aug(image=x, mask=y)
        #    x1 = augmented['image']
        #    y1 = augmented['mask']

        ## Horizontal Flip
        aug = HorizontalFlip(p=1)
        augmented = aug(image=x, mask=y)
        x2 = augmented['image']
        y2 = augmented['mask']

        aug = VerticalFlip(p=1)
        augmented = aug(image=x, mask=y)
        x3 = augmented['image']
        y3 = augmented['mask']

        #      aug = Normalize(p=1)
        #      augmented = aug(image=x, mask=y)
        #      x4 = augmented['image']
        #      y4 = augmented['mask']

        aug = Transpose(p=1)
        augmented = aug(image=x, mask=y)
        x5 = augmented['image']
        y5 = augmented['mask']

        aug = RandomGamma(p=1)
        augmented = aug(image=x, mask=y)
        x6 = augmented['image']
        y6 = augmented['mask']

        ## Optical Distortion
        aug = OpticalDistortion(p=1, distort_limit=2, shift_limit=0.5)
        augmented = aug(image=x, mask=y)
        x7 = augmented['image']
        y7 = augmented['mask']

        ## Grid Distortion
        aug = GridDistortion(p=1)
        augmented = aug(image=x, mask=y)
        x8 = augmented['image']
        y8 = augmented['mask']

        aug = RandomGridShuffle(p=1)
        augmented = aug(image=x, mask=y)
        x9 = augmented['image']
        y9 = augmented['mask']

        aug = HueSaturationValue(p=1)
        augmented = aug(image=x, mask=y)
        x10 = augmented['image']
        y10 = augmented['mask']

        #        aug = PadIfNeeded(p=1)
        #        augmented = aug(image=x, mask=y)
        #        x11 = augmented['image']
        #        y11 = augmented['mask']

        aug = RGBShift(p=1)
        augmented = aug(image=x, mask=y)
        x12 = augmented['image']
        y12 = augmented['mask']

        ## Random Brightness
        aug = RandomBrightness(p=1)
        augmented = aug(image=x, mask=y)
        x13 = augmented['image']
        y13 = augmented['mask']

        ## Random  Contrast
        aug = RandomContrast(p=1)
        augmented = aug(image=x, mask=y)
        x14 = augmented['image']
        y14 = augmented['mask']

        #aug = MotionBlur(p=1)
        #augmented = aug(image=x, mask=y)
        #   x15 = augmented['image']
        #  y15 = augmented['mask']

        aug = MedianBlur(p=1, blur_limit=5)
        augmented = aug(image=x, mask=y)
        x16 = augmented['image']
        y16 = augmented['mask']

        aug = GaussianBlur(p=1, blur_limit=3)
        augmented = aug(image=x, mask=y)
        x17 = augmented['image']
        y17 = augmented['mask']

        aug = GaussNoise(p=1)
        augmented = aug(image=x, mask=y)
        x18 = augmented['image']
        y18 = augmented['mask']

        aug = GlassBlur(p=1)
        augmented = aug(image=x, mask=y)
        x19 = augmented['image']
        y19 = augmented['mask']

        aug = CLAHE(clip_limit=1.0,
                    tile_grid_size=(8, 8),
                    always_apply=False,
                    p=1)
        augmented = aug(image=x, mask=y)
        x20 = augmented['image']
        y20 = augmented['mask']

        aug = ChannelShuffle(p=1)
        augmented = aug(image=x, mask=y)
        x21 = augmented['image']
        y21 = augmented['mask']

        aug = ToGray(p=1)
        augmented = aug(image=x, mask=y)
        x22 = augmented['image']
        y22 = augmented['mask']

        aug = ToSepia(p=1)
        augmented = aug(image=x, mask=y)
        x23 = augmented['image']
        y23 = augmented['mask']

        aug = JpegCompression(p=1)
        augmented = aug(image=x, mask=y)
        x24 = augmented['image']
        y24 = augmented['mask']

        aug = ImageCompression(p=1)
        augmented = aug(image=x, mask=y)
        x25 = augmented['image']
        y25 = augmented['mask']

        aug = Cutout(p=1)
        augmented = aug(image=x, mask=y)
        x26 = augmented['image']
        y26 = augmented['mask']

        #       aug = CoarseDropout(p=1, max_holes=8, max_height=32, max_width=32)
        #       augmented = aug(image=x, mask=y)
        #       x27 = augmented['image']
        #       y27 = augmented['mask']

        #       aug = ToFloat(p=1)
        #       augmented = aug(image=x, mask=y)
        #       x28 = augmented['image']
        #       y28 = augmented['mask']

        aug = FromFloat(p=1)
        augmented = aug(image=x, mask=y)
        x29 = augmented['image']
        y29 = augmented['mask']

        ## Random Brightness and Contrast
        aug = RandomBrightnessContrast(p=1)
        augmented = aug(image=x, mask=y)
        x30 = augmented['image']
        y30 = augmented['mask']

        aug = RandomSnow(p=1)
        augmented = aug(image=x, mask=y)
        x31 = augmented['image']
        y31 = augmented['mask']

        aug = RandomRain(p=1)
        augmented = aug(image=x, mask=y)
        x32 = augmented['image']
        y32 = augmented['mask']

        aug = RandomFog(p=1)
        augmented = aug(image=x, mask=y)
        x33 = augmented['image']
        y33 = augmented['mask']

        aug = RandomSunFlare(p=1)
        augmented = aug(image=x, mask=y)
        x34 = augmented['image']
        y34 = augmented['mask']

        aug = RandomShadow(p=1)
        augmented = aug(image=x, mask=y)
        x35 = augmented['image']
        y35 = augmented['mask']

        aug = Lambda(p=1)
        augmented = aug(image=x, mask=y)
        x36 = augmented['image']
        y36 = augmented['mask']

        aug = ChannelDropout(p=1)
        augmented = aug(image=x, mask=y)
        x37 = augmented['image']
        y37 = augmented['mask']

        aug = ISONoise(p=1)
        augmented = aug(image=x, mask=y)
        x38 = augmented['image']
        y38 = augmented['mask']

        aug = Solarize(p=1)
        augmented = aug(image=x, mask=y)
        x39 = augmented['image']
        y39 = augmented['mask']

        aug = Equalize(p=1)
        augmented = aug(image=x, mask=y)
        x40 = augmented['image']
        y40 = augmented['mask']

        aug = Posterize(p=1)
        augmented = aug(image=x, mask=y)
        x41 = augmented['image']
        y41 = augmented['mask']

        aug = Downscale(p=1)
        augmented = aug(image=x, mask=y)
        x42 = augmented['image']
        y42 = augmented['mask']

        aug = MultiplicativeNoise(p=1)
        augmented = aug(image=x, mask=y)
        x43 = augmented['image']
        y43 = augmented['mask']

        aug = FancyPCA(p=1)
        augmented = aug(image=x, mask=y)
        x44 = augmented['image']
        y44 = augmented['mask']

        #       aug = MaskDropout(p=1)
        #       augmented = aug(image=x, mask=y)
        #       x45 = augmented['image']
        #       y45 = augmented['mask']

        aug = GridDropout(p=1)
        augmented = aug(image=x, mask=y)
        x46 = augmented['image']
        y46 = augmented['mask']

        aug = ColorJitter(p=1)
        augmented = aug(image=x, mask=y)
        x47 = augmented['image']
        y47 = augmented['mask']

        ## ElasticTransform
        aug = ElasticTransform(p=1,
                               alpha=120,
                               sigma=512 * 0.05,
                               alpha_affine=512 * 0.03)
        augmented = aug(image=x, mask=y)
        x50 = augmented['image']
        y50 = augmented['mask']

        aug = CropNonEmptyMaskIfExists(p=1, height=22, width=32)
        augmented = aug(image=x, mask=y)
        x51 = augmented['image']
        y51 = augmented['mask']

        aug = IAAAffine(p=1)
        augmented = aug(image=x, mask=y)
        x52 = augmented['image']
        y52 = augmented['mask']

        #        aug = IAACropAndPad(p=1)
        #        augmented = aug(image=x, mask=y)
        #        x53 = augmented['image']
        #        y53 = augmented['mask']

        aug = IAAFliplr(p=1)
        augmented = aug(image=x, mask=y)
        x54 = augmented['image']
        y54 = augmented['mask']

        aug = IAAFlipud(p=1)
        augmented = aug(image=x, mask=y)
        x55 = augmented['image']
        y55 = augmented['mask']

        aug = IAAPerspective(p=1)
        augmented = aug(image=x, mask=y)
        x56 = augmented['image']
        y56 = augmented['mask']

        aug = IAAPiecewiseAffine(p=1)
        augmented = aug(image=x, mask=y)
        x57 = augmented['image']
        y57 = augmented['mask']

        aug = LongestMaxSize(p=1)
        augmented = aug(image=x, mask=y)
        x58 = augmented['image']
        y58 = augmented['mask']

        aug = NoOp(p=1)
        augmented = aug(image=x, mask=y)
        x59 = augmented['image']
        y59 = augmented['mask']

        #       aug = RandomCrop(p=1, height=22, width=22)
        #       augmented = aug(image=x, mask=y)
        #       x61 = augmented['image']
        #       y61 = augmented['mask']

        #      aug = RandomResizedCrop(p=1, height=22, width=20)
        #      augmented = aug(image=x, mask=y)
        #      x63 = augmented['image']
        #      y63 = augmented['mask']

        aug = RandomScale(p=1)
        augmented = aug(image=x, mask=y)
        x64 = augmented['image']
        y64 = augmented['mask']

        #      aug = RandomSizedCrop(p=1, height=22, width=20, min_max_height = [32,32])
        #      augmented = aug(image=x, mask=y)
        #      x66 = augmented['image']
        #      y66 = augmented['mask']

        #      aug = Resize(p=1, height=22, width=20)
        #      augmented = aug(image=x, mask=y)
        #      x67 = augmented['image']
        #      y67 = augmented['mask']

        aug = Rotate(p=1)
        augmented = aug(image=x, mask=y)
        x68 = augmented['image']
        y68 = augmented['mask']

        aug = ShiftScaleRotate(p=1)
        augmented = aug(image=x, mask=y)
        x69 = augmented['image']
        y69 = augmented['mask']

        aug = SmallestMaxSize(p=1)
        augmented = aug(image=x, mask=y)
        x70 = augmented['image']
        y70 = augmented['mask']

        images_aug.extend([
            x, x0, x2, x3, x5, x6, x7, x8, x9, x10, x12, x13, x14, x16, x17,
            x18, x19, x20, x21, x22, x23, x24, x25, x26, x29, x30, x31, x32,
            x33, x34, x35, x36, x37, x38, x39, x40, x41, x42, x43, x44, x46,
            x47, x50, x51, x52, x54, x55, x56, x57, x58, x59, x64, x68, x69,
            x70
        ])

        masks_aug.extend([
            y, y0, y2, y3, y5, y6, y7, y8, y9, y10, y12, y13, y14, y16, y17,
            y18, y19, y20, y21, y22, y23, y24, y25, y26, y29, y30, y31, y32,
            y33, y34, y35, y36, y37, y38, y39, y40, y41, y42, y43, y44, y46,
            y47, y50, y51, y52, y54, y55, y56, y57, y58, y59, y64, y68, y69,
            y70
        ])

        idx = -1
        images_name = []
        masks_name = []
        for i, m in zip(images_aug, masks_aug):
            if idx == -1:
                tmp_image_name = f"{image_name}"
                tmp_mask_name = f"{mask_name}"
            else:
                tmp_image_name = f"{image_name}_{smalllist[idx]}"
                tmp_mask_name = f"{mask_name}_{smalllist[idx]}"
            images_name.extend(tmp_image_name)
            masks_name.extend(tmp_mask_name)
            idx += 1

        it = iter(images_name)
        it2 = iter(images_aug)
        imagedict = dict(zip(it, it2))

        it = iter(masks_name)
        it2 = iter(masks_aug)
        masksdict = dict(zip(it, it2))

    return imagedict, masksdict
コード例 #7
0
    def __getitem__(self, index):
        fn, label = self.imgs[index]
        img = cv2.imread(
            fn)  #  bgr格式,像素值 0~255,在transfrom.totensor会除以255,使像素值变成 0~1
        #print(fn)
        img_copy = copy.deepcopy(img)
        # 做替换
        #   print(fn)
        #im = cv2.imread('baseline/MM.png')
        #print(img)
        # 圆脸, 三角脸, 下半脸,
        OVERLAY_POINTS_list = [
            LEFT_BROW_POINTS + RIGHT_BROW_POINTS + JAW_POINTS,
            LEFT_BROW_POINTS + RIGHT_BROW_POINTS +
            [48, 59, 58, 57, 56, 55, 54], NOSE_POINTS + list(range(1, 16)),
            [29] + list(range(2, 15)), MOUTH_POINTS + list(range(4, 13))
        ]

        fn_p = fn.split('/')[-1]

        point = np.array(self.dict[fn_p])
        # 选择mask,

        # with open('found_video.json', 'r') as f:
        #    self.video_record = json.load(f)

        mask_index = random.choice([0, 1, 2, 3, 4, 5])
        if mask_index != 5:
            OVERLAY_POINTS = OVERLAY_POINTS_list[mask_index]

            mask = get_face_mask_more(img.shape[0:2], point, OVERLAY_POINTS)
        elif mask_index == 5:
            mask = cut_head([img], point)
        '''
        OVERLAY_POINTS = OVERLAY_POINTS_list[0]
        mask = get_face_mask_more(img.shape[0:2], point, OVERLAY_POINTS)
        '''
        #c = random.choice([0,1,2,3,4,    5,6,7,8,9,10,11,12,13,14,15])

        blend_method = random.choice([0, 2])

        #print(fn_p)

        if random.choice([0, 1]):
            new = img
            label = 0

        else:
            mask = mask / 255
            mask = mask.astype(np.float32)
            mask = self.distortion.augment_image(mask)
            mask = random_erode_dilate(mask)
            if np.sum(mask) == 0 or np.sum(1 - mask) == 0:
                raise NotImplementedError

            if self.augMethod == 1:  #f1
                # 正向高斯平滑
                label = 1
                #print('gsdata')
                siz = random.choice([7, 9, 11])
                new = cv2.GaussianBlur(img_copy, (siz, siz), 0)
                new = blend(img_copy, new, mask, blend_method)

            elif self.augMethod == 2:  #f2
                label = 1
                #print('De_gsdata')
                siz = random.choice([7, 9, 11])
                new = cv2.GaussianBlur(img_copy, (siz, siz), 0)
                new = deblend(img_copy, new, mask, blend_method)

            elif self.augMethod == 3:  #f1
                # 正向scaling
                label = 1

                siz = random.randint(64, 128)
                H, W = img.shape[0:2]
                resized = cv2.resize(img_copy, (int(siz), int(siz)))
                new = cv2.resize(resized, (W, H), cv2.INTER_NEAREST)

                new = blend(img_copy, new, mask, blend_method)

            elif self.augMethod == 4:  #f2
                label = 1
                siz = random.randint(64, 128)
                H, W = img.shape[0:2]
                resized = cv2.resize(img_copy, (int(siz), int(siz)))
                new = cv2.resize(resized, (W, H), cv2.INTER_NEAREST)
                new = deblend(img_copy, new, mask, blend_method)

            elif self.augMethod == 5:  #f1
                # 正向scaling
                label = 1

                aug = ISONoise(color_shift=(0.08, 0.15),
                               intensity=(0.1, 0.2),
                               p=1)
                new = aug(image=img_copy)['image']

                new = blend(img_copy, new, mask, blend_method)

            elif self.augMethod == 6:  #f2
                label = 1
                aug = ISONoise(color_shift=(0.08, 0.15),
                               intensity=(0.1, 0.2),
                               p=1)
                new = aug(image=img_copy)['image']

                new = deblend(img_copy, new, mask, blend_method)

            elif self.augMethod == 7:  #f1
                # 正向scaling
                label = 1
                siz = random.uniform(3, 4)
                if random.choice([0, 1]):
                    siz = -siz

                aug = ShiftScaleRotate(shift_limit=0,
                                       scale_limit=0,
                                       rotate_limit=(siz, siz),
                                       p=1)
                new = aug(image=img_copy)['image']

                new = blend(img_copy, new, mask, blend_method)

            elif self.augMethod == 8:  #f2

                f1, label = self.imgs[index]
                f2 = str(random.sample(self.dict.keys(), 1)[0])
                f1_read = f1
                label = 1
                im1 = cv2.imread(f1_read)
                landmarks1 = np.array(self.dict[f1.split('/')[-1]])

                landmarks2 = np.array(self.dict[f2])
                f2 = 'train/resize_image/' + f2
                im2 = cv2.imread(f2)

                landmarks1 = np.matrix([[p[0], p[1]] for p in landmarks1])
                landmarks2 = np.matrix([[p[0], p[1]] for p in landmarks2])

                M = transformation_from_points(landmarks1[ALIGN_POINTS],
                                               landmarks2[ALIGN_POINTS])

                mask_index = random.choice([0, 1, 2, 3, 4])
                OVERLAY_POINTS = OVERLAY_POINTS_list[mask_index]

                mask_swap = get_face_mask(im2, landmarks2, OVERLAY_POINTS)

                warped_mask = warp_im(mask_swap, M, im1.shape)
                mask = np.max([
                    get_face_mask(im1, landmarks1, OVERLAY_POINTS), warped_mask
                ],
                              axis=0)

                warped_im2 = warp_im(im2, M, im1.shape)

                warped_corrected_im2 = correct_colours(im1, warped_im2,
                                                       landmarks1)
                warped_corrected_im2 = np.around(warped_corrected_im2)
                warped_corrected_im2[warped_corrected_im2 > 255] = 255
                warped_corrected_im2[
                    warped_corrected_im2 < 0] = 0  # saturation
                warped_corrected_im2 = warped_corrected_im2.astype(np.uint8)

                new = blend(im1, warped_corrected_im2, mask, blend_method)

            elif self.augMethod == 0:  # F的数据
                label = 1
                #print('alldata')
                t = random.choice([0, 1, 2, 3, 4])
                if t == 0:  #gs,degs
                    siz = random.choice([7, 9, 11])
                    new = cv2.GaussianBlur(img_copy, (siz, siz), 0)
                    if random.choice([0, 1]):
                        new = blend(img_copy, new, mask, blend_method)
                    else:
                        new = deblend(img_copy, new, mask, blend_method)
                elif t == 1:
                    siz = random.randint(64, 128)
                    H, W = img.shape[0:2]
                    resized = cv2.resize(img_copy, (int(siz), int(siz)))
                    new = cv2.resize(resized, (W, H), cv2.INTER_NEAREST)
                    if random.choice([0, 1]):
                        new = blend(img_copy, new, mask, blend_method)
                    else:
                        new = deblend(img_copy, new, mask, blend_method)
                elif t == 2:
                    aug = ISONoise(color_shift=(0.08, 0.15),
                                   intensity=(0.1, 0.2),
                                   p=1)
                    new = aug(image=img_copy)['image']
                    if random.choice([0, 1]):
                        new = blend(img_copy, new, mask, blend_method)
                    else:
                        new = deblend(img_copy, new, mask, blend_method)

                elif t == 3:
                    siz = random.uniform(3, 4)
                    if random.choice([0, 1]):
                        siz = -siz
                    aug = ShiftScaleRotate(shift_limit=0,
                                           scale_limit=0,
                                           rotate_limit=(siz, siz),
                                           p=1)
                    new = aug(image=img_copy)['image']
                    new = blend(img_copy, new, mask, blend_method)

                elif t == 4:
                    f1, label = self.imgs[index]
                    f2 = str(random.sample(self.dict.keys(), 1)[0])
                    f1_read = f1
                    label = 1
                    im1 = cv2.imread(f1_read)
                    landmarks1 = np.array(self.dict[f1.split('/')[-1]])

                    landmarks2 = np.array(self.dict[f2])
                    f2 = 'train/resize_image/' + f2
                    im2 = cv2.imread(f2)

                    landmarks1 = np.matrix([[p[0], p[1]] for p in landmarks1])
                    landmarks2 = np.matrix([[p[0], p[1]] for p in landmarks2])

                    M = transformation_from_points(landmarks1[ALIGN_POINTS],
                                                   landmarks2[ALIGN_POINTS])

                    mask_index = random.choice([0, 1, 2, 3, 4])
                    OVERLAY_POINTS = OVERLAY_POINTS_list[mask_index]

                    mask_swap = get_face_mask(im2, landmarks2, OVERLAY_POINTS)

                    warped_mask = warp_im(mask_swap, M, im1.shape)
                    mask = np.max([
                        get_face_mask(im1, landmarks1, OVERLAY_POINTS),
                        warped_mask
                    ],
                                  axis=0)

                    warped_im2 = warp_im(im2, M, im1.shape)

                    warped_corrected_im2 = correct_colours(
                        im1, warped_im2, landmarks1)
                    warped_corrected_im2 = np.around(warped_corrected_im2)
                    warped_corrected_im2[warped_corrected_im2 > 255] = 255
                    warped_corrected_im2[
                        warped_corrected_im2 < 0] = 0  # saturation
                    warped_corrected_im2 = warped_corrected_im2.astype(
                        np.uint8)

                    new = blend(im1, warped_corrected_im2, mask, blend_method)

        new = new.astype(np.uint8)

        new = my_aug(new)

        lin = Image.fromarray(cv2.cvtColor(new, cv2.COLOR_BGR2RGB))
        if self.transform is not None:
            new = self.transform(lin)  # 在这里做transform,转为tensor等等

        return new, label
コード例 #8
0
def train(file_pattern,
          train_num_batches=None,
          train_aug=False,
          train_batch_size=1,
          val_batch_size=1,
          learning_rate=1e-3,
          epochs=1,
          verbosity=2,
          file_directory=None,
          resume=None,
          train_shuffle=True,
          pre_image_mean=None,
          post_image_mean=None):
    """
    Function to train the UNet model
    Parameters
    ----------
    file_pattern : string
        Location where the image folder is for the data. Example format:
        "images/*pre_disaster*.png"
    train_num_batches : int
        Number of batches for the training set, if none, the full dataset will
        be used.
    train_aug : bool
        If true, augmentations are performed.
    train_batch_size : int, default 5
        Batch size for the training set.
    val_batch_size : int, default 5
        Batch size for the validation set.
    learning_rate : float, default 0.00001
        Learning rate for the UNet.
    epochs : int, default 1
        How many epochs for the training to run.
    verbosity : int, default 2
        How verbose you'd like the output to be.
    file_directory : string, default None:
        Directory where you'd like the output files saved.
    resume : string, default None
        Enter in a string for the saved model file and training will resume
        from this instance.
    train_shuffle : bool
        If True, the training data is shuffled for each epoch.
    pre_image_mean : str
        The filepath for the pre image mean numpy array file.
    post_image_mean : str
        The filepath for the post image mean numpy array file.
    Returns
    -------
    Saves the model weights, csv logs, and tensorboard files in the original
    directories specified.

    """
    if file_directory is None:
        file_directory = os.path.abspath(
            os.path.join(os.getcwd(), "saved_models"))

    tensorboard_path = os.path.join(
        file_directory, "logs",
        "tboard_{}".format(datetime.datetime.now().strftime("%Y%m%d-%H%M")))
    weights_path = os.path.join(
        file_directory, "unet_weights_{}".format(
            datetime.datetime.now().strftime("%Y%m%d-%H%M")))
    csv_logger_path = os.path.join(
        file_directory,
        "log_unet_{}{}".format(datetime.datetime.now().strftime("%Y%m%d-%H%M"),
                               ".csv"))

    if train_aug:
        train_augs = Compose([
            VerticalFlip(p=0.5),
            RandomRotate90(p=0.5),
            ISONoise(p=0.5),
            RandomBrightnessContrast(p=0.5),
            RandomGamma(p=0.5),
            RandomFog(fog_coef_lower=0.025, fog_coef_upper=0.1, p=0.5),
        ])

    else:
        train_augs = None

    # Weighted categorical cross entropy weights
    # class_weights = tf.constant([0.1, 1.0, 2.0, 2.0, 2.0])
    # class_weights = tf.constant([1.0, 1.0, 0.5, 0.5, 0.5])
    class_weights = tf.constant([1.0, 1.0, 3.0, 3.0, 3.0])

    train_data = LabeledImageDataset(num_batches=train_num_batches,
                                     augmentations=train_augs,
                                     pattern=file_pattern,
                                     shuffle=train_shuffle,
                                     n_classes=5,
                                     batch_size=train_batch_size,
                                     normalize=True)

    # Using random samples from train for validation
    val_data = LabeledImageDataset(num_batches=100,
                                   augmentations=train_augs,
                                   pattern=file_pattern,
                                   shuffle=train_shuffle,
                                   n_classes=5,
                                   batch_size=val_batch_size,
                                   normalize=True)
    if resume:
        try:
            print("the pretrained model was loaded")
            model = UNet(num_classes=5).model((None, None, 3))
            model.load_weights(resume)
        except OSError:
            print("The model file could not be found. "
                  "Starting from a new model instance")
            model = UNet(num_classes=5).model((None, None, 3))
    else:
        model = UNet(num_classes=5).model((None, None, 3))

    metrics = [tf.keras.metrics.CategoricalAccuracy()]
    for i in range(5):
        metrics.append(Precision(class_id=i, name=f"prec_class_{i}"))
        metrics.append(Recall(class_id=i, name=f"rec_class_{i}"))

    model.compile(optimizer=keras.optimizers.RMSprop(lr=learning_rate),
                  loss=CombinedLoss(class_weights),
                  metrics=metrics)

    # Creating a checkpoint to save the model after every epoch if the
    # validation loss has decreased
    model_checkpoint = ModelCheckpoint("dual_unet_{epoch:02d}-{loss:.2f}.hdf5",
                                       monitor='loss',
                                       save_best_only=False,
                                       mode='min',
                                       save_weights_only=True,
                                       verbose=verbosity)

    csv_logger = CSVLogger(csv_logger_path, append=True, separator=',')

    lr_logger = ReduceLROnPlateau(monitor='loss',
                                  factor=0.2,
                                  patience=1,
                                  verbose=verbosity,
                                  mode='min',
                                  min_lr=1e-6)

    tensorboard_cb = TensorBoard(log_dir=tensorboard_path, write_images=True)

    try:
        model.fit(train_data,
                  epochs=epochs,
                  verbose=verbosity,
                  callbacks=[
                      LossAndErrorPrintingCallback(), model_checkpoint,
                      csv_logger, lr_logger, tensorboard_cb
                  ],
                  validation_data=val_data,
                  workers=6)

    except KeyboardInterrupt:
        save_model(model, pause=1)
        sys.exit()
    except Exception as exc:
        save_model(model, pause=0)
        raise exc
コード例 #9
0
                       val_shift_limit=20,
                       p=0.2),
    RGBShift(r_shift_limit=20, g_shift_limit=20, b_shift_limit=20, p=0.15),
    RandomBrightnessContrast(p=0.2),
    MotionBlur(blur_limit=7, p=0.2),
    GaussianBlur(blur_limit=7, p=0.15),
    CLAHE(p=0.05),
    ChannelShuffle(p=0.05),
    ToGray(p=0.1),
    ImageCompression(quality_lower=10, quality_upper=100, p=0.15),
    CoarseDropout(max_holes=32, max_height=12, max_width=12, p=0.05),
    Downscale(p=0.3),
    FancyPCA(alpha=0.4, p=0.1),
    Posterize(num_bits=4, p=0.03),
    Equalize(p=0.05),
    ISONoise(color_shift=(0.1, 0.5), p=0.07),
    RandomFog(p=0.03)
]

BACKGROUNDS_PATHS = glob(BACKGROUNDS_WILDRCARD)
BACKGROUNDS = [
    load_image(path, cv.COLOR_BGR2RGB) for path in BACKGROUNDS_PATHS
]

ENTRY_TRANSFORMATION = EntryTransformation(class_mapping=CLASS_MAPPINGS,
                                           target_size=MODEL_INPUT_SIZE,
                                           backgrounds=BACKGROUNDS)

DATA_AUGMENTATIONS = [
    DataAugmentation(transformations=AUGMENTATIONS,
                     global_application_probab=0.6),
コード例 #10
0
ファイル: center-resnet18.py プロジェクト: iofthetiger/pkuad
# In[11]:

albu_list = [
    RandomBrightnessContrast(brightness_limit=(-0.3, 0.3),
                             contrast_limit=(-0.3, 0.3),
                             p=0.3),
    RandomGamma(p=0.2),
    HueSaturationValue(p=0.3),
    RGBShift(p=0.3),
    MotionBlur(p=0.1),
    Blur(p=0.1),
    GaussNoise(var_limit=(20, 100), p=0.2),
    ChannelShuffle(p=0.2),
    MultiplicativeNoise(multiplier=(0.7, 1.2), p=0.2),
    ISONoise(p=0.2),
    GaussNoise(var_limit=(10.0, 50.0), mean=0, always_apply=False, p=0.5)
]

# NOT in colab version: MultiplicativeNoise(multiplier=(0.7, 1.2), p=0.2), ISONoise(p=0.2),
# GaussNoise(var_limit=(10.0, 50.0), mean=0, always_apply=False, p=0.5)

p_transform_train = 0.1
albu_transform_train = Compose(albu_list, p=p_transform_train)

p_transform_val = 0.05
albu_transform_valid = Compose(albu_list, p=p_transform_val)

# # PyTorch Dataset

# In[12]:
コード例 #11
0
        transform = iaa.Convolve(matrix=matrix)
        transformed_image = transform(image=image)

    ## Corruption

    elif augmentation == 'gauss_noise':
        transform = GaussNoise(always_apply=True, var_limit=(200.0, 250.0))
        transformed_image = transform(image=image)['image']

    elif augmentation == 'multiplicative_noise':
        transform = MultiplicativeNoise(always_apply=True, 
                                        multiplier=(0.5, 1.5))
        transformed_image = transform(image=image)['image']

    elif augmentation == 'iso_noise':
        transform = ISONoise(always_apply=True, color_shift=(0.08, 0.1), 
                                                intensity=(0.5, 0.8))
        transformed_image = transform(image=image)['image']

    elif augmentation == 'shot_noise':
        transform = iaa.imgcorruptlike.ShotNoise(severity=2)
        transformed_image = transform(image=image)
    
    elif augmentation == 'speckle_noise':
        transform = iaa.imgcorruptlike.SpeckleNoise(severity=2)
        transformed_image = transform(image=image)
    
    elif augmentation == 'random_shadow':
        transform = RandomShadow(always_apply=True)
        transformed_image = transform(image=image)['image']

    elif augmentation == 'random_sun_flare':