예제 #1
0
    def get_data_transforms(self) -> Tuple[BasicTransform, BasicTransform]:
        """Get albumentations transform objects for data augmentation.

        Returns:
           1st tuple arg: a transform that doesn't do any data augmentation
           2nd tuple arg: a transform with data augmentation
        """
        cfg = self.cfg
        bbox_params = self.get_bbox_params()
        transform = Compose([Resize(cfg.data.img_sz, cfg.data.img_sz)],
                            bbox_params=bbox_params)

        augmentors_dict = {
            'Blur': Blur(),
            'RandomRotate90': RandomRotate90(),
            'HorizontalFlip': HorizontalFlip(),
            'VerticalFlip': VerticalFlip(),
            'GaussianBlur': GaussianBlur(),
            'GaussNoise': GaussNoise(),
            'RGBShift': RGBShift(),
            'ToGray': ToGray()
        }
        aug_transforms = []
        for augmentor in cfg.data.augmentors:
            try:
                aug_transforms.append(augmentors_dict[augmentor])
            except KeyError as e:
                log.warning(
                    '{0} is an unknown augmentor. Continuing without {0}. \
                    Known augmentors are: {1}'.format(
                        e, list(augmentors_dict.keys())))
        aug_transforms.append(Resize(cfg.data.img_sz, cfg.data.img_sz))
        aug_transform = Compose(aug_transforms, bbox_params=bbox_params)

        return transform, aug_transform
예제 #2
0
    def get_data_transforms(self):
        cfg = self.cfg
        bbox_params = self.get_bbox_params()
        transform = Compose([Resize(cfg.data.img_sz, cfg.data.img_sz)],
                            bbox_params=bbox_params)

        augmentors_dict = {
            'Blur': Blur(),
            'RandomRotate90': RandomRotate90(),
            'HorizontalFlip': HorizontalFlip(),
            'VerticalFlip': VerticalFlip(),
            'GaussianBlur': GaussianBlur(),
            'GaussNoise': GaussNoise(),
            'RGBShift': RGBShift(),
            'ToGray': ToGray()
        }
        aug_transforms = []
        for augmentor in cfg.data.augmentors:
            try:
                aug_transforms.append(augmentors_dict[augmentor])
            except KeyError as e:
                log.warning(
                    '{0} is an unknown augmentor. Continuing without {0}. \
                    Known augmentors are: {1}'.format(
                        e, list(augmentors_dict.keys())))
        aug_transforms.append(Resize(cfg.data.img_sz, cfg.data.img_sz))
        aug_transform = Compose(aug_transforms, bbox_params=bbox_params)

        return transform, aug_transform
예제 #3
0
def rand_aug():
    return Compose([
        RandomRotate90(p=0.2),
	GaussNoise(p=0.2),
	HorizontalFlip(p=0.2),
	RandomCrop(p=0.2),
	HueSaturationValue(p=0.2),
	RandomBrightness(p=0.2),
	RandomContrast(p=0.2),
	RandomGamma(p=0.2),
	GaussianBlur(p=0.2),
	]),
	Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1.0)
예제 #4
0
def build_databunch(data_dir, img_sz, batch_sz, class_names, augmentors):
    num_workers = 4

    train_dir = join(data_dir, 'train')
    valid_dir = join(data_dir, 'valid')

    augmentors_dict = {
        'Blur': Blur(),
        'RandomRotate90': RandomRotate90(),
        'HorizontalFlip': HorizontalFlip(),
        'VerticalFlip': VerticalFlip(),
        'GaussianBlur': GaussianBlur(),
        'GaussNoise': GaussNoise(),
        'RGBShift': RGBShift(),
        'ToGray': ToGray()
    }

    aug_transforms = []
    for augmentor in augmentors:
        try:
            aug_transforms.append(augmentors_dict[augmentor])
        except KeyError as e:
            log.warning('{0} is an unknown augmentor. Continuing without {0}. \
                Known augmentors are: {1}'.format(e,
                                                  list(
                                                      augmentors_dict.keys())))
    aug_transforms = Compose(aug_transforms)

    train_ds = AlbumentationDataset(ImageFolder(train_dir,
                                                classes=class_names),
                                    transform=aug_transforms)
    valid_ds = AlbumentationDataset(ImageFolder(valid_dir,
                                                classes=class_names))

    train_dl = DataLoader(train_ds,
                          shuffle=True,
                          batch_size=batch_sz,
                          num_workers=num_workers,
                          drop_last=True,
                          pin_memory=True)
    valid_dl = DataLoader(valid_ds,
                          batch_size=batch_sz,
                          num_workers=num_workers,
                          pin_memory=True)

    return DataBunch(train_ds, train_dl, valid_ds, valid_dl, class_names)
예제 #5
0
 def __init__(self,
              df=None,
              size=150,
              mean=[0.485, 0.456, 0.406],
              std=[0.229, 0.224, 0.225],
              augment=True,
              frames=30,
              stochastic=True):
     """ Dataset initialization
     Parameters
     ----------
     df : pd.DataFrame
         Dataframe with preprocessed data
     transform : torchvision.transforms
         Transformation operations for loaded images
     path : str
         Path to folder with the data
     frames : int
         Frames to load per video
     """
     assert df is not None, 'Missing dataframe for data'
     self.frames = frames
     self.df = df[df['nframes'] >= frames]
     self.stochastic = stochastic
     addtl_img = {}
     for idx in range(frames):
         addtl_img['image{}'.format(idx)] = 'image'
     if augment:
         self.transform = albumentations.Compose(
             [
                 ShiftScaleRotate(p=0.3,
                                  scale_limit=0.25,
                                  border_mode=1,
                                  rotate_limit=15),
                 HorizontalFlip(p=0.2),
                 RandomBrightnessContrast(
                     p=0.3, brightness_limit=0.25, contrast_limit=0.5),
                 MotionBlur(p=.2),
                 GaussNoise(p=.2),
                 JpegCompression(p=.2, quality_lower=50),
                 Normalize(mean, std)
             ],
             additional_targets=addtl_img)
     else:
         self.transform = albumentations.Compose([Normalize(mean, std)])
     self.resize = transforms.Resize((size, size))
예제 #6
0
def apply_background(img):
    if ".png" in img:
        # Import the render
        render = Image.open(WORK_DIR + "/renders/" + img).convert("RGBA")
        rw, rh = render.size

        # Choose a random background
        background = random.choice(os.listdir(WORK_DIR + "/backgrounds/"))
        background = Image.open(WORK_DIR + "/backgrounds/" +
                                background).convert("RGBA")
        bw, bh = render.size

        # Resize the background to match the render
        background = background.resize((rw, rh))

        # Center crop the background based on the render size
        # background = background.crop(((bw - rw) / 2, (bh - rh)/2, (bw + rw)/2, (bh + rh)/2))

        # Merge the background and the render
        background.paste(render, (0, 0), mask=render)
        background.save(WORK_DIR + "/renders/" + img)

        # Set the image transforms
        transforms = albumentations.Compose([
            GaussNoise(),
            # HorizontalFlip(),
            # Rotate(limit=45),
            HueSaturationValue(hue_shift_limit=5,
                               sat_shift_limit=10,
                               val_shift_limit=50),
            RandomBrightnessContrast(),
        ])

        # Apply the transforms to the image
        image = imread(WORK_DIR + "/renders/" + img, pilmode="RGB")
        image = transforms(image=image)

        print(
            f"Applying background and augmentations to {WORK_DIR + '/renders/' + img}"
        )

        imsave(WORK_DIR + "/renders/" + img, image["image"])
예제 #7
0
X, val_X, y, val_y = get_random_sampling(paths, y, val_paths, val_y)

print('There are ' + str(y.count(1)) + ' fake train samples')
print('There are ' + str(y.count(0)) + ' real train samples')
print('There are ' + str(val_y.count(1)) + ' fake val samples')
print('There are ' + str(val_y.count(0)) + ' real val samples')

import albumentations
from albumentations.augmentations.transforms import ShiftScaleRotate, HorizontalFlip, Normalize, RandomBrightnessContrast, MotionBlur, Blur, GaussNoise, JpegCompression
train_transform = albumentations.Compose([
                                          ShiftScaleRotate(p=0.3, scale_limit=0.25, border_mode=1, rotate_limit=25),
                                          HorizontalFlip(p=0.2),
                                          RandomBrightnessContrast(p=0.3, brightness_limit=0.25, contrast_limit=0.5),
                                          MotionBlur(p=.2),
                                          GaussNoise(p=.2),
                                          JpegCompression(p=.2, quality_lower=50),
                                          Normalize()
])
val_transform = albumentations.Compose([
                                          Normalize()
])

train_dataset = ImageDataset(X, y, transform=train_transform)
val_dataset = ImageDataset(val_X, val_y, transform=val_transform)


### Train

import gc
예제 #8
0
	RandomGamma(p=0.2),
	GaussianBlur(p=0.2),
	]),
	Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), p=1.0)
    ])

from albumentations.pytorch.transforms import ToTensor
from albumentations.augmentations.transforms import (RandomRotate90, GaussNoise, HorizontalFlip, RandomCrop, HueSaturationValue, RandomBrightness, RandomContrast, RandomGamma, GaussianBlur, Normalize, Compose)

# Data Upload
print('\n[Phase 1] : Data Preparation')
transform_train = 
	Compose([
		Compose([
			RandomRotate90(p=0.2),
			GaussNoise(p=0.2),
			HorizontalFlip(p=0.2),
			RandomCrop(p=0.2),
			HueSaturationValue(p=0.2),
			RandomBrightness(p=0.2),
			RandomContrast(p=0.2),
			RandomGamma(p=0.2),
			GaussianBlur(p=0.2)
		], p=1.0),
		ToTensor(),
		Normalize(mean=mean[dataset], std=std[dataset], p=1.0)
	], p=1.0)
])

transform_test = Compose([
	ToTensor(),
예제 #9
0
def main():

    args = arg_parser()

    seed_everything(args.seed)

    if cuda.is_available() and not args.cpu:
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")

    print(device)

    if args.model_type == 'cnn':
        if args.preprocess:
            train_df = pd.read_csv('../input/preprocessed_train_df.csv')
            valid_df = pd.read_csv('../input/preprocessed_valid_df.csv')
        else:
            train_df = pd.read_csv('../input/train_df.csv')
            valid_df = pd.read_csv('../input/valid_df.csv')
        valid_sample_num = 40000

    elif args.model_type == 'lrcn':
        if args.preprocess:
            train_df = pd.read_pickle(
                '../input/preprocessed_lrcn_train_df.pkl')
            valid_df = pd.read_pickle(
                '../input/preprocessed_lrcn_train_df.pkl')
        else:
            train_df = pd.read_pickle('../input/lrcn_train_df.pkl')
            valid_df = pd.read_pickle('../input/lrcn_valid_df.pkl')
        valid_sample_num = 15000

    print("number of train data {}".format(len(train_df)))
    print("number of valid data {}\n".format(len(valid_df)))

    train_df = train_df.sample(frac=args.train_sample_num,
                               random_state=args.seed).reset_index(drop=True)
    valid_df_sub = valid_df.sample(
        frac=1.0, random_state=42).reset_index(drop=True)[:valid_sample_num]
    valid_df_sub1 = valid_df.sample(
        frac=1.0, random_state=52).reset_index(drop=True)[:valid_sample_num]
    valid_df_sub2 = valid_df.sample(
        frac=1.0, random_state=62).reset_index(drop=True)[:valid_sample_num]
    del valid_df
    gc.collect()

    if args.DEBUG:
        train_df = train_df[:1000]
        valid_df_sub = valid_df_sub[:1000]
        valid_df_sub1 = valid_df_sub1[:1000]
        valid_df_sub2 = valid_df_sub2[:1000]

    if args.model_type == 'cnn':
        train_transforms = albumentations.Compose([
            HorizontalFlip(p=0.3),
            #   ShiftScaleRotate(p=0.3, scale_limit=0.25, border_mode=1, rotate_limit=25),
            #   RandomBrightnessContrast(p=0.2, brightness_limit=0.25, contrast_limit=0.5),
            #   MotionBlur(p=0.2),
            GaussNoise(p=0.3),
            JpegCompression(p=0.3, quality_lower=50),
            #   Normalize()
        ])
        valid_transforms = albumentations.Compose([
            HorizontalFlip(p=0.2),
            albumentations.OneOf([
                JpegCompression(quality_lower=8, quality_upper=30, p=1.0),
                GaussNoise(p=1.0),
            ],
                                 p=0.22),
            #   Normalize()
        ])
    elif args.model_type == 'lrcn':
        train_transforms = None
        valid_transforms = None

    train_loader = build_dataset(args,
                                 train_df,
                                 transforms=train_transforms,
                                 is_train=True)
    batch_num = len(train_loader)
    valid_loader = build_dataset(args,
                                 valid_df_sub,
                                 transforms=valid_transforms,
                                 is_train=False)
    valid_loader1 = build_dataset(args,
                                  valid_df_sub1,
                                  transforms=valid_transforms,
                                  is_train=False)
    valid_loader2 = build_dataset(args,
                                  valid_df_sub2,
                                  transforms=valid_transforms,
                                  is_train=False)

    model = build_model(args, device)

    if args.model == 'mobilenet_v2':
        save_path = os.path.join(args.PATH, 'weights', f'mobilenet_v2_best.pt')
    elif args.model == 'resnet18':
        save_path = os.path.join(args.PATH, 'weights', f'resnet18_best.pt')
    elif args.model == 'resnet50':
        save_path = os.path.join(args.PATH, 'weights', f'resnet50_best.pt')
    elif args.model == 'resnext':
        save_path = os.path.join(args.PATH, 'weights', f'resnext_best.pt')
    elif args.model == 'xception':
        save_path = os.path.join(args.PATH, 'weights', f'xception_best.pt')
    else:
        NotImplementedError

    if args.model_type == 'lrcn':
        save_path = os.path.join(args.PATH, 'weights', f'lrcn_best.pt')

    optimizer = build_optimizer(args, model)
    scheduler = build_scheduler(args, optimizer, batch_num)

    train_cfg = {
        'train_loader': train_loader,
        'valid_loader': valid_loader,
        'valid_loader1': valid_loader1,
        'valid_loader2': valid_loader2,
        'model': model,
        'criterion': nn.BCEWithLogitsLoss(),
        'optimizer': optimizer,
        'scheduler': scheduler,
        'save_path': save_path,
        'device': device
    }

    train_model(args, train_cfg)
예제 #10
0
def build_databunch(data_dir, img_sz, batch_sz, class_names, rare_classes,
                    desired_prob, augmentors):
    num_workers = 4

    train_dir = join(data_dir, 'train')
    valid_dir = join(data_dir, 'valid')

    augmentors_dict = {
        'Blur': Blur(),
        'RandomRotate90': RandomRotate90(),
        'HorizontalFlip': HorizontalFlip(),
        'VerticalFlip': VerticalFlip(),
        'GaussianBlur': GaussianBlur(),
        'GaussNoise': GaussNoise(),
        'RGBShift': RGBShift(),
        'ToGray': ToGray()
    }

    aug_transforms = []
    for augmentor in augmentors:
        try:
            aug_transforms.append(augmentors_dict[augmentor])
        except KeyError as e:
            log.warning('{0} is an unknown augmentor. Continuing without {0}. \
                Known augmentors are: {1}'.format(e,
                                                  list(
                                                      augmentors_dict.keys())))
    aug_transforms = Compose(aug_transforms)

    train_ds = AlbumentationDataset(ImageFolder(train_dir,
                                                classes=class_names),
                                    transform=aug_transforms)
    valid_ds = AlbumentationDataset(ImageFolder(valid_dir,
                                                classes=class_names))

    if rare_classes != []:
        targets = [target for _, target in train_ds.orig_dataset.imgs]
        train_sample_weights = calculate_oversampling_weights(
            targets, rare_classes, desired_prob)
        num_train_samples = len(train_ds)
        train_sampler = WeightedRandomSampler(weights=train_sample_weights,
                                              num_samples=num_train_samples,
                                              replacement=True)
        shuffle = False
    else:
        train_sampler = None
        shuffle = True

    train_dl = DataLoader(train_ds,
                          shuffle=shuffle,
                          batch_size=batch_sz,
                          num_workers=num_workers,
                          drop_last=True,
                          pin_memory=True,
                          sampler=train_sampler)
    valid_dl = DataLoader(valid_ds,
                          batch_size=batch_sz,
                          num_workers=num_workers,
                          pin_memory=True)

    return DataBunch(train_ds, train_dl, valid_ds, valid_dl, class_names)
            fake_img = self.transforms(image=fake_img)
            fake_img = fake_img['image']
        
        real_img = np.rollaxis(real_img, 2, 0)            
        fake_img = np.rollaxis(fake_img, 2, 0)
                    
        return (real_img, torch.tensor(0, dtype=torch.float32)), (fake_img, torch.tensor(1, dtype=torch.float32)) 



train_transforms = albumentations.Compose([
                                        #   ShiftScaleRotate(p=0.3, scale_limit=0.25, border_mode=1, rotate_limit=25),
                                          HorizontalFlip(p=0.3),
                                        #   RandomBrightnessContrast(p=0.2, brightness_limit=0.25, contrast_limit=0.5),
                                        #   MotionBlur(p=0.2),
                                          GaussNoise(p=0.3),
                                          JpegCompression(p=0.3, quality_lower=50),
                                          Normalize()
])
valid_transforms = albumentations.Compose([
                                          HorizontalFlip(p=0.2),
                                          albumentations.OneOf([
                                              JpegCompression(quality_lower=8, quality_upper=30, p=1.0),
                                              # Downscale(scale_min=0.25, scale_max=0.75, p=1.0),
                                              GaussNoise(p=1.0),
                                          ], p=0.22),
                                          Normalize()
])