コード例 #1
0
    def __init__(self, n, m):
        self.n = n
        self.m = m

        m_ratio = self.m / 30.0
        self.augment_list = (
            A.CLAHE(always_apply=True),
            A.Equalize(always_apply=True),
            A.InvertImg(always_apply=True),
            A.Rotate(limit=30 * m_ratio, always_apply=True),
            A.Posterize(num_bits=int(4 * m_ratio), always_apply=True),
            A.Solarize(threshold=m_ratio, always_apply=True),
            A.RGBShift(r_shift_limit=110 * m_ratio,
                       g_shift_limit=110 * m_ratio,
                       b_shift_limit=110 * m_ratio,
                       always_apply=True),
            A.HueSaturationValue(hue_shift_limit=20 * m_ratio,
                                 sat_shift_limit=30 * m_ratio,
                                 val_shift_limit=20 * m_ratio,
                                 always_apply=True),
            A.RandomContrast(limit=m_ratio, always_apply=True),
            A.RandomBrightness(limit=m_ratio, always_apply=True),
            #  A.Sharpen(always_apply=True), 0.1, 1.9),
            A.ShiftScaleRotate(shift_limit=0.3 * m_ratio,
                               shift_limit_y=0,
                               rotate_limit=0,
                               always_apply=True),
            A.ShiftScaleRotate(shift_limit=0.3 * m_ratio,
                               shift_limit_x=0,
                               rotate_limit=0,
                               always_apply=True),
            A.Cutout(num_holes=int(8 * m_ratio), always_apply=True),
            A.IAAAffine(shear=0.3 * m_ratio, always_apply=True))

        assert self.n <= len(self.augment_list)
コード例 #2
0
 def __init__(self, noisy=False, p=1):
     argumentors = [
         RandomBrightness(p=0.25),
         RandomContrast(p=0.25),
         RandomHue(p=0.25),
         RandomSaturation(p=0.25),
         RandomEqualize(p=0.0625),
         RandomAutoContrast(p=0.0625),
         RandomAlpha(p=0.25),
     ]
     if noisy:
         argumentors.extend(
             [
                 RandomPosterize(p=0.0625),
                 A.Solarize(threshold=(50, 255 - 50), p=0.0625),
                 RandomBlur(p=0.125),
                 RandomUnsharpMask(p=0.125),
                 A.IAASharpen(alpha=(0, 0.5), p=0.125),
                 GaussNoise(p=0.125),
                 SpeckleNoise(p=0.125),
                 A.ISONoise(color_shift=(0, 0.05), intensity=(0, 0.5), p=0.125),
                 A.JpegCompression(quality_lower=50, quality_upper=100, p=0.125),
             ]
         )
     super().__init__(argumentors, p=p)
コード例 #3
0
def get_transform(stage, gb_prob=1.0, solarize_prob=0.):
    t_list = []
    normalize = A.Normalize(mean=[0.485, 0.456, 0.406],
                            std=[0.229, 0.224, 0.225])
    if stage in ('train', 'val'):
        t_list = [
            A.RandomResizedCrop(224, 224),
            A.HorizontalFlip(p=0.5),
            A.RandomBrightnessContrast(brightness_limit=0.4, contrast_limit=0.4, p=0.8),
            A.HueSaturationValue(hue_shift_limit=int(0.1 * 255),
                                 sat_shift_limit=int(0.2 * 255),
                                 val_shift_limit=0, p=0.8),
            A.ToGray(p=0.2),
            A.GaussianBlur(blur_limit=[23, 23], sigma_limit=[0.1, 0.2], p=gb_prob),
            A.Solarize(p=solarize_prob),
            normalize,
            ToTensorV2()
        ]
    elif stage == 'ft':
        t_list = [
            A.RandomResizedCrop(224, 224),
            A.HorizontalFlip(p=0.5),
            normalize,
            ToTensorV2()
        ]
    elif stage == 'test':
        t_list = [
            A.Resize(256, 256),
            A.CenterCrop(224, 224),
            normalize,
            ToTensorV2()
        ]
    transform = A.Compose(t_list)
    return transform
    def __random_transform(self, img, masks):
        # composition = albu.Compose([
        #     albu.HorizontalFlip(),
        #     albu.VerticalFlip(),
        #     albu.ShiftScaleRotate(rotate_limit=45, shift_limit=0.15, scale_limit=0.15)
        # ])

        composition = albu.Compose(
            [
                # albu.OneOf([albu.RandomSizedCrop(min_max_height=(self.reshape[0]//2, self.reshape[0]),
                #                                  height=self.reshape[0], width=self.reshape[1], w2h_ratio=1.5,
                #                                  p=0.5),
                #       albu.PadIfNeeded(min_height=self.reshape[0], min_width=self.reshape[1], p=0.5)], p=0.3),
                # albu.OneOf([
                #     albu.CropNonEmptyMaskIfExists(self.reshape[0], self.reshape[1]),
                #     albu.RandomCrop(self.reshape[0],self.reshape[1]),
                #     albu.Resize(self.reshape[0],self.reshape[1])
                #
                # ], p=1.0),
                albu.RandomSizedCrop(
                    min_max_height=(self.reshape[0] // 2, self.reshape[0]),
                    height=self.reshape[0],
                    width=self.reshape[1],
                    w2h_ratio=self.reshape[1] / self.reshape[0],
                    p=0.3),
                albu.HorizontalFlip(),
                albu.VerticalFlip(),
                albu.ShiftScaleRotate(
                    rotate_limit=45, shift_limit=0.15, scale_limit=0.15),
                albu.OneOf([
                    albu.ElasticTransform(
                        alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03),
                    albu.GridDistortion(),
                    albu.OpticalDistortion(distort_limit=2, shift_limit=0.5)
                ],
                           p=0.3),
                albu.OneOf([
                    albu.RandomBrightnessContrast(),
                    albu.RandomGamma(),
                    albu.Solarize()
                ],
                           p=0.5)
            ],
            p=1)

        composed = composition(image=img.astype(np.uint8), mask=masks)
        aug_img = composed['image']
        aug_masks = composed['mask']

        return aug_img, aug_masks
コード例 #5
0
 def __init__(self):
     self.policy = A.Compose([
         A.OneOf([
             A.Rotate(180),
             A.Flip(),
         ], p=0.3),
         A.ShiftScaleRotate(shift_limit=0.2, scale_limit=0.5, rotate_limit=0, p=0.2),
         A.OneOf([
             A.CoarseDropout(max_holes=16, max_height=16, max_width=16, p=0.3),
             A.GridDropout(ratio=0.3, p=0.3),
         ]),
         A.OneOf([
             A.ElasticTransform(sigma=10, alpha_affine=25, p=0.3),
             A.RandomFog(fog_coef_lower=0.2, fog_coef_upper=0.7, p=0.2),
         ], p=0.2),
         A.OneOf([
             A.IAAAdditiveGaussianNoise(),
             A.GaussNoise(),
             A.ISONoise()
         ], p=0.2),
         A.OneOf([
             A.MotionBlur(p=.3),
             A.MedianBlur(blur_limit=5, p=0.3),
             A.Blur(blur_limit=5, p=0.3),
             A.GaussianBlur(p=0.3)
         ], p=0.2),
         A.OneOf([
             A.ChannelShuffle(p=.3),
             A.HueSaturationValue(p=0.3),
             A.ToGray(p=0.3),
             A.ChannelDropout(p=0.3),
             A.InvertImg(p=0.1)
         ], p=0.2),
         A.OneOf([
             A.OpticalDistortion(p=0.3),
             A.GridDistortion(p=.2),
             A.IAAPiecewiseAffine(p=0.3),
         ], p=0.2),
         A.OneOf([
             A.CLAHE(clip_limit=2),
             A.IAASharpen(),
             A.IAAEmboss(),
         ], p=0.2),
         A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.3),
         A.Solarize(p=0.2),
     ])
コード例 #6
0
 def __init__(self, k: int = 5, always_apply: bool = True, p: float = 1.0):
     super(RandAugmentAlb, self).__init__(always_apply, p)
     self.k = k
     self.candidates = [
         AutoContrast(p=1.0),
         A.Equalize(p=1.0),
         A.InvertImg(p=1.0),
         Rotate(30., p=1.0),
         A.Posterize([4, 8], p=1.0),
         A.Solarize([0, 256], p=1.0),
         A.RandomBrightnessContrast(brightness_limit=0.,
                                    contrast_limit=(0.05, 0.95),
                                    p=1.0),
         A.RandomBrightnessContrast(brightness_limit=(0.05, 0.95),
                                    contrast_limit=0.,
                                    p=1.0),
         ShearX(0.3),
         ShearY(0.3),
         Translate(0.45),
     ]
コード例 #7
0
def get_training_augmentation3():
    train_transform = [
        albu.ShiftScaleRotate(
            shift_limit=0.05,
            scale_limit=0.1,
            rotate_limit=15,
            border_mode=cv2.BORDER_CONSTANT,
            value=0,
        ),
        albu.OpticalDistortion(
            distort_limit=0.11,
            shift_limit=0.15,
            border_mode=cv2.BORDER_CONSTANT,
            value=0,
        ),
        albu.ElasticTransform(
            p=0.5, alpha=120, sigma=120 * 0.05, alpha_affine=120 * 0.03
        ),
        albu.Solarize(threshold=128, always_apply=False, p=0.5),
        albu.HorizontalFlip(p=0.5),
    ]
    return train_transform
コード例 #8
0
    def __init__(
        self,
        transforms=None,
        mean=(0, 0, 0),
        std=(1, 1, 1),
        width=3,
        depth=-1,
        alpha=1.,
        p=1.,
    ):
        self.transforms = transforms
        self.mean = mean
        self.std = std
        self.width = width
        self.depth = depth
        self.alpha = alpha
        self.p = p

        if self.transforms is None:
            self.transforms = [
                AutoContrast(cutoff=0, p=1),
                albu.Equalize(mode='pil', p=1),
                albu.Posterize(num_bits=(3, 4), p=1),
                albu.ShiftScaleRotate(shift_limit=0, scale_limit=0, rotate_limit=5, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),  # rotate
                albu.Solarize(threshold=77, p=1),
                RandomShear(shear_x=0.09, shear_y=0, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                RandomShear(shear_x=0, shear_y=0.09, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                VerticalShift(shift_limit=0.09, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                HorizontalShift(shift_limit=0.09, border_mode=cv2.BORDER_CONSTANT, value=(0, 0, 0), p=1),
                # ImageNet-C
                albu.HueSaturationValue(hue_shift_limit=0, sat_shift_limit=(-36, 0), val_shift_limit=0, p=1),  # saturation
                albu.RandomContrast(limit=(-0.36, 0), p=1),
                albu.RandomBrightness(limit=(-0.36, 0), p=1),
                albu.OneOf([  # sharpness
                    albu.IAASharpen(alpha=(0.1, 0.5), lightness=0, p=1),
                    albu.Blur(blur_limit=7, p=1),
                ], p=0.5),
            ]
コード例 #9
0
 def __init__(self, noisy: bool = False, grayscale: bool = False, p=1):
     argumentors = [
         RandomBrightness(p=0.25),
         RandomContrast(p=0.25),
         RandomHue(p=0.25),
         RandomSaturation(p=0.25),
         RandomAlpha(p=0.25),
     ]
     if noisy:
         argumentors.extend([
             RandomEqualize(p=0.0625),
             RandomAutoContrast(p=0.0625),
             RandomPosterize(p=0.0625),
             A.Solarize(threshold=(50, 255 - 50), p=0.0625),
             RandomBlur(p=0.125),
             RandomUnsharpMask(p=0.125),
             GaussNoise(p=0.125),
         ])
     if not grayscale and noisy:
         argumentors.extend([
             A.ISONoise(color_shift=(0, 0.05), intensity=(0, 0.5), p=0.125)
         ])
     super().__init__(argumentors, p=p)
コード例 #10
0
def choco_transform(p):
    return albu.Compose([
        # Flips
        albu.HorizontalFlip(p=0.5),
        albu.VerticalFlip(p=0.5),
        # Shift, scale, rotate
        albu.ShiftScaleRotate(rotate_limit=0, 
                         scale_limit=0.15,  
                         border_mode=cv2.BORDER_CONSTANT, 
                         value=[255, 255, 255],
                         p=0.5),
        # Noise
        albu.IAAAdditiveGaussianNoise(p=0.2),
        # Color
        albu.Solarize(p=0.2),
        albu.ToGray(p=0.2),
        # Contrast, brightness
        albu.OneOf([
                albu.RandomGamma(p=1),
                albu.RandomContrast(p=1),
                albu.RandomBrightness(p=1)
            ], p=0.5)
    ], p=p, additional_targets={'image{}'.format(_) : 'image' for _ in range(1, 101)})
コード例 #11
0
ファイル: fixmatch.py プロジェクト: AdeelH/ssl-experiments
                   apply_transform, validate)
from ema import ModelEMA

logging.basicConfig(stream=sys.stdout, level=logging.INFO)
log = logging.getLogger()

weak_aug_tf = A.Compose([
    A.HorizontalFlip(p=0.5),
    A.ShiftScaleRotate(scale_limit=0, rotate_limit=12.5, p=0.5), base_tf
])

strong_aug_tf = A.Compose([
    A.ShiftScaleRotate(),
    A.OneOf([
        A.CLAHE(),
        A.Solarize(),
        A.ColorJitter(),
        A.ToGray(),
        A.ToSepia(),
        A.RandomBrightness(),
        A.RandomGamma(),
    ]),
    A.CoarseDropout(max_height=4, max_width=4, max_holes=3, p=0.25), base_tf
])


def train_epoch_fixmatch(epoch,
                         model,
                         ema_model,
                         train_dl_l,
                         train_dl_ul,
コード例 #12
0
def main_worker(gpu, ngpus_per_node, args):
    args.gpu = gpu

    # suppress printing if not master
    if args.multiprocessing_distributed and args.gpu != 0:

        def print_pass(*args):
            pass

        builtins.print = print_pass

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    # create model
    print("=> creating model '{}'".format(args.arch))
    model = moco.builder.MoCo(models.__dict__[args.arch], args.moco_dim,
                              args.moco_k, args.moco_m, args.moco_t, args.mlp)
    #print(model)

    if args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(
                (args.workers + ngpus_per_node - 1) / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
        # comment out the following line for debugging
        raise NotImplementedError("Only DistributedDataParallel is supported.")
    else:
        # AllGather implementation (batch shuffle, queue update, etc.) in
        # this code only supports DistributedDataParallel.
        raise NotImplementedError("Only DistributedDataParallel is supported.")

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                # Map model to be loaded to specified single gpu.
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
    if args.aug_plus:
        # MoCo v2's aug: similar to SimCLR https://arxiv.org/abs/2002.05709
        augmentation = [
            transforms.RandomResizedCrop(224, scale=(0.2, 1.)),
            transforms.RandomApply(
                [
                    transforms.ColorJitter(0.4, 0.4, 0.4,
                                           0.1)  # not strengthened
                ],
                p=0.8),
            transforms.RandomGrayscale(p=0.2),
            transforms.RandomApply([moco.loader.GaussianBlur([.1, 2.])],
                                   p=0.5),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize
        ]
    else:
        # MoCo v1's aug: the same as InstDisc https://arxiv.org/abs/1805.01978
        augmentation = A.Compose([
            A.RandomResizedCrop(always_apply=False,
                                p=1,
                                height=224,
                                width=224,
                                scale=(0.08, 1.0),
                                ratio=(0.75, 1.33333333),
                                interpolation=2),
            A.ColorJitter(0.4, 0.4, 0.4, 0.1, False, 0.8),
            A.HorizontalFlip(p=0.5),
            A.ToGray(p=0.2),
            A.Solarize(p=0.2),
            A.CLAHE(p=0.2),
            A.RandomBrightness(p=0.2),
            A.Normalize(mean=(0.4914, 0.4822, 0.4465),
                        std=(0.2471, 0.2435, 0.2616),
                        max_pixel_value=255.0,
                        always_apply=True,
                        p=1.0),
            ToTensorV2()
        ])

    train_dataset = ImageFolder(traindir,
                                moco.loader2.TwoCropsTransform(augmentation))

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler,
                                               drop_last=True)

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)
        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                },
                is_best=False,
                filename='checkpoint_{:04d}.pth.tar'.format(epoch))
コード例 #13
0
def get_aug_trans(use_color_aug,
                  use_weak_shape_aug,
                  use_strong_shape_aug,
                  mean=(0.5, 0.5, 0.5),
                  std=(0.5, 0.5, 0.5)):
    transform = transforms.Compose([
        transforms.ToTensor(),  # range [0, 255] -> [0.0,1.0]
        # range [0.0, 1.0] -> [-1.0,1.0]
        transforms.Normalize(mean=mean, std=std)
    ])

    if use_color_aug:
        c_aug = A.Compose([
            A.RandomBrightnessContrast(p=0.7,
                                       brightness_limit=0.2,
                                       contrast_limit=0.2),
            A.CoarseDropout(p=0.5,
                            max_holes=8,
                            max_height=16,
                            max_width=16,
                            min_height=8,
                            min_width=8,
                            fill_value=0),
            A.OneOf([
                A.Blur(p=1, blur_limit=7),
                A.MotionBlur(p=1, blur_limit=7),
                A.MedianBlur(p=1, blur_limit=7),
                A.GaussianBlur(p=1, blur_limit=7)
            ],
                    p=0.5),
            A.OneOf([
                A.RandomGamma(p=1, gamma_limit=(80, 120)),
                A.GaussNoise(p=1, var_limit=(10.0, 50.0)),
                A.ISONoise(p=1, color_shift=(0.01, 0.05),
                           intensity=(0.1, 0.5)),
            ],
                    p=0.3),
            A.OneOf(
                [
                    A.HueSaturationValue(p=1,
                                         hue_shift_limit=20,
                                         sat_shift_limit=30,
                                         val_shift_limit=20),
                    A.RGBShift(p=1,
                               r_shift_limit=20,
                               g_shift_limit=20,
                               b_shift_limit=20),
                    A.CLAHE(p=1, clip_limit=4.0, tile_grid_size=(8, 8)),
                    # A.ChannelShuffle(p=0.5),
                    # A.InvertImg(p=0.5),
                    A.Solarize(p=1, threshold=128),
                ],
                p=0.5),
            A.JpegCompression(quality_lower=10, quality_upper=30, p=0.3),
        ])
    else:
        c_aug = None

    # crop_size = (args.train_input_h, args.train_input_w)

    if use_strong_shape_aug:
        shape_aug = get_strong_shape_aug()
    elif use_weak_shape_aug:
        shape_aug = horizontal_flip
    else:
        shape_aug = None

    return transform, c_aug, shape_aug
コード例 #14
0
def main_worker(gpu, ngpus_per_node, args):
    args.gpu = gpu

    # suppress printing if not master
    if args.multiprocessing_distributed and args.gpu != 0:

        def print_pass(*args):
            pass

        builtins.print = print_pass

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size,
                                rank=args.rank)
    # create model
    print("=> creating model '{}'".format(args.arch))
    model = PixPro(models.__dict__[args.arch], args.pixpro_mom,
                   args.ppm_layers, args.ppm_gamma)

    if args.distributed:
        #hopefully this is the right place to do this:
        model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)

        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int(
                (args.workers + ngpus_per_node - 1) / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(
                model, device_ids=[args.gpu], find_unused_parameters=True)
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)

    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
        # comment out the following line for debugging
        raise NotImplementedError("Only DistributedDataParallel is supported.")
    else:
        # AllGather implementation (batch shuffle, queue update, etc.) in
        # this code only supports DistributedDataParallel.
        raise NotImplementedError("Only DistributedDataParallel is supported.")

    #define loss criterion and optimizer
    criterion = ConsistencyLoss(distance_thr=args.pixpro_t).cuda(args.gpu)

    optimizer = configure_optimizer(model, args)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                # Map model to be loaded to specified single gpu.
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)
            args.start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    #physical space only
    space_tfs = A.Compose([A.RandomResizedCrop(224, 224),
                           A.HorizontalFlip()],
                          additional_targets={
                              'grid_y': 'image',
                              'grid_x': 'image'
                          })

    #could work for both views
    view1_color_tfs = A.Compose([
        A.ColorJitter(0.4, 0.4, 0.2, 0.1, p=0.8),
        A.ToGray(p=0.2),
        A.GaussianBlur(blur_limit=23, sigma_limit=(0.1, 2.0), p=1.0),
        A.Normalize(),
        ToTensorV2()
    ])

    #technically optional, but used in the BYOL paper
    view2_color_tfs = A.Compose([
        A.ColorJitter(0.4, 0.4, 0.2, 0.1, p=0.8),
        A.ToGray(p=0.2),
        A.GaussianBlur(blur_limit=23, sigma_limit=(0.1, 2.0), p=0.1),
        A.Solarize(p=0.2),
        A.Normalize(),
        ToTensorV2()
    ])

    train_dataset = ContrastData(args.data, space_tfs, view1_color_tfs,
                                 view2_color_tfs)

    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset)
    else:
        train_sampler = None

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=args.batch_size,
                                               shuffle=(train_sampler is None),
                                               num_workers=args.workers,
                                               pin_memory=True,
                                               sampler=train_sampler,
                                               drop_last=True)

    #encoder momentum is updated by STEP and not EPOCH
    args.train_steps = args.epochs * len(train_loader)
    args.current_step = args.start_epoch * len(train_loader)

    if args.fp16:
        scaler = GradScaler()
    else:
        scaler = None

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)

        adjust_learning_rate(optimizer, epoch, args)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, scaler, epoch, args)

        if not args.multiprocessing_distributed or (
                args.multiprocessing_distributed
                and args.rank % ngpus_per_node == 0):
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                },
                is_best=False,
                filename=os.path.join(
                    args.model_dir, 'checkpoint_{:04d}.pth.tar'.format(epoch)))
コード例 #15
0
    def __call__(self, data):
        rgb, thermal, depth, audio, label, id = data

        rgb = rgb.astype(np.float32)
        height, width, _ = rgb.shape
        albumentations_transform_pixel = {
            'Blur': albumentations.Blur(),
            #'CLAHE':albumentations.CLAHE(),
            'ChannelDropout': albumentations.ChannelDropout(),
            'ChannelShuffle': albumentations.ChannelShuffle(),
            'CoarseDropout': albumentations.CoarseDropout(),
            #'Equalize':albumentations.Equalize(),
            #'FancyPCA':albumentations.FancyPCA(),
            'GaussNoise': albumentations.GaussNoise(),
            'GaussianBlur': albumentations.GaussianBlur(),
            #'GlassBlur':albumentations.GlassBlur(),
            'HueSaturationValue': albumentations.HueSaturationValue(),
            'IAAAdditiveGaussianNoise':
            albumentations.IAAAdditiveGaussianNoise(),
            #'ISONoise':albumentations.ISONoise(),
            'RGBShift': albumentations.RGBShift(),
            'RandomBrightnessContrast':
            albumentations.RandomBrightnessContrast(),
            'RandomFog': albumentations.RandomFog(),
            #'RandomGamma':albumentations.RandomGamma(),
            'RandomRain': albumentations.RandomRain(),
            'RandomShadow': albumentations.RandomShadow(),
            'RandomSnow': albumentations.RandomSnow(),
            'RandomSunFlare': albumentations.RandomSunFlare(),
            'Solarize': albumentations.Solarize(),
        }
        albumentations_transform_bbox = {
            #'HorizontalFlip':albumentations.HorizontalFlip(),
            #'VerticalFlip':albumentations.VerticalFlip(),
            #'CenterCrop':albumentations.CenterCrop(height=height-10, width=width-10, p=0.5),
            #'RandomCropNearBBox':albumentations.RandomCropNearBBox(p=0.5),
            #'Crop':albumentations.Crop(x_min=10, y_min =10, y_max=height-10, x_max=width-10, p=0.5),
            #'ElasticTransform':albumentations.ElasticTransform(),
            #'ShiftScaleRotate':albumentations.ShiftScaleRotate(),
        }
        transform = np.random.choice(
            ['None'] + list(albumentations_transform_pixel.keys()) +
            list(albumentations_transform_bbox.keys()))

        if transform in albumentations_transform_pixel:
            aug = albumentations.Compose(
                [albumentations_transform_pixel[transform]],
                bbox_params={
                    'format': 'pascal_voc',
                    'label_fields': ['labels']
                })
            try:
                annots = np.array(annots).astype(np.float32)
                aug_result = aug(image=rgb,
                                 bboxes=annots[:, :4],
                                 labels=annots[:, 4])
                rgb = aug_result['image']
                annots = np.hstack([
                    aug_result['bboxes'],
                    np.array(aug_result['labels']).reshape(-1, 1)
                ])
            except Exception as e:
                print(
                    f"transform={transform} aug_result['bboxes']={aug_result['bboxes']}            aug_result['labels']={aug_result['labels']}"
                )
                raise Exception(e)

        elif transform in albumentations_transform_bbox:
            aug = albumentations.Compose(
                [albumentations_transform_bbox[transform]],
                bbox_params={
                    'format': 'pascal_voc',
                    'label_fields': ['labels']
                })
            try:
                annots = np.array(annots).astype(np.float32)
                aug_result = aug(image=rgb,
                                 bboxes=annots[:, :4],
                                 labels=annots[:, 4])
                rgb = aug_result['image']
                label = np.hstack([
                    aug_result['bboxes'],
                    np.array(aug_result['labels']).reshape(-1, 1)
                ])
            except Exception as e:
                print(
                    f"transform={transform} aug_result['bboxes']={aug_result['bboxes']}            aug_result['labels']={aug_result['labels']}"
                )
                raise Exception(e)

        return rgb, thermal, depth, audio, label, id
コード例 #16
0
def get_transform_imagenet(use_albu_aug):
    if use_albu_aug:
        train_transform = al.Compose([
            # al.Flip(p=0.5),
            al.Resize(256, 256, interpolation=2),
            al.RandomResizedCrop(224,
                                 224,
                                 scale=(0.08, 1.0),
                                 ratio=(3. / 4., 4. / 3.),
                                 interpolation=2),
            al.HorizontalFlip(),
            al.OneOf(
                [
                    al.OneOf(
                        [
                            al.ShiftScaleRotate(
                                border_mode=cv2.BORDER_CONSTANT,
                                rotate_limit=30),  # , p=0.05),
                            al.OpticalDistortion(
                                border_mode=cv2.BORDER_CONSTANT,
                                distort_limit=5.0,
                                shift_limit=0.1),
                            # , p=0.05),
                            al.GridDistortion(border_mode=cv2.BORDER_CONSTANT
                                              ),  # , p=0.05),
                            al.ElasticTransform(
                                border_mode=cv2.BORDER_CONSTANT,
                                alpha_affine=15),  # , p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomGamma(),  # p=0.05),
                            al.HueSaturationValue(),  # p=0.05),
                            al.RGBShift(),  # p=0.05),
                            al.CLAHE(),  # p=0.05),
                            al.ChannelShuffle(),  # p=0.05),
                            al.InvertImg(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.RandomSnow(),  # p=0.05),
                            al.RandomRain(),  # p=0.05),
                            al.RandomFog(),  # p=0.05),
                            al.RandomSunFlare(num_flare_circles_lower=1,
                                              num_flare_circles_upper=2,
                                              src_radius=110),
                            # p=0.05, ),
                            al.RandomShadow(),  # p=0.05),
                        ],
                        p=0.1),
                    al.RandomBrightnessContrast(p=0.1),
                    al.OneOf(
                        [
                            al.GaussNoise(),  # p=0.05),
                            al.ISONoise(),  # p=0.05),
                            al.MultiplicativeNoise(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.ToGray(),  # p=0.05),
                            al.ToSepia(),  # p=0.05),
                            al.Solarize(),  # p=0.05),
                            al.Equalize(),  # p=0.05),
                            al.Posterize(),  # p=0.05),
                            al.FancyPCA(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            # al.MotionBlur(blur_limit=1),
                            al.Blur(blur_limit=[3, 5]),
                            al.MedianBlur(blur_limit=[3, 5]),
                            al.GaussianBlur(blur_limit=[3, 5]),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.CoarseDropout(),  # p=0.05),
                            al.Cutout(),  # p=0.05),
                            al.GridDropout(),  # p=0.05),
                            al.ChannelDropout(),  # p=0.05),
                            al.RandomGridShuffle(),  # p=0.05),
                        ],
                        p=0.1),
                    al.OneOf(
                        [
                            al.Downscale(),  # p=0.1),
                            al.ImageCompression(quality_lower=60),  # , p=0.1),
                        ],
                        p=0.1),
                ],
                p=0.5),
            al.Normalize(),
            ToTensorV2()
        ])
    else:
        train_transform = transforms.Compose([
            transforms.Resize(256),
            transforms.RandomResizedCrop(224),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
        ])
    test_transform = transforms.Compose([
        transforms.Resize(256),
        transforms.CenterCrop(224),
        transforms.ToTensor(),
    ])

    if use_albu_aug:
        train_transform = MultiDataTransformAlbu(train_transform)
    else:
        train_transform = MultiDataTransform(train_transform)

    return train_transform, test_transform
コード例 #17
0
ファイル: Gradio Test 1.py プロジェクト: abidlabs/Sign1
def get_train_aug(RESOLUTION=300): 
    return A.Compose([
        A.LongestMaxSize(max_size=RESOLUTION*2, interpolation=cv2.INTER_CUBIC, \
                         always_apply=True),
        A.PadIfNeeded(min_height=RESOLUTION*2, min_width=RESOLUTION*2, always_apply=True, border_mode=cv2.BORDER_CONSTANT),
        A.RandomResizedCrop(RESOLUTION,RESOLUTION, scale=(0.7, 1), \
                            interpolation=cv2.INTER_CUBIC),
        A.Resize(RESOLUTION, RESOLUTION, p=1.0, interpolation=cv2.INTER_CUBIC),
        A.FancyPCA(p=0.8, alpha=0.5),
#         A.Transpose(p=0.7),
        A.HorizontalFlip(p=0.5),
        A.VerticalFlip(p=0.1),
        A.ShiftScaleRotate(p=0.4, rotate_limit=12),
        A.HueSaturationValue(
            always_apply=False, p=0.3, 
            hue_shift_limit=(-20, 20), 
            sat_shift_limit=(-30, 30), 
            val_shift_limit=(-20, 20)),

#         A.HueSaturationValue(
#             hue_shift_limit=0.4, #.3
#             sat_shift_limit=0.4, #.3
#             val_shift_limit=0.4, #.3
#             p=0.7
#         ),
        A.RandomBrightnessContrast(
            brightness_limit=(-0.5,0.5), #-.2,.2
            contrast_limit=(-0.4, 0.4),  #-.2,.2
            #p=0.6
        ),
        A.CoarseDropout(p=0.8, max_holes=30),
#         A.Cutout(p=0.8, max_h_size=40, max_w_size=40),
        A.Cutout(p=1, max_h_size=60, max_w_size=30, num_holes=6, fill_value=[106,87,55]),
        A.Cutout(p=1, max_h_size=30, max_w_size=60, num_holes=6, fill_value=[106,87,55]),
        A.OneOf([
                A.OpticalDistortion(always_apply=False, p=1.0, distort_limit=(-0.6599999666213989, 0.6800000071525574), 
                                    shift_limit=(-0.6699999570846558, 0.4599999785423279), interpolation=0, 
                                    border_mode=0, value=(0, 0, 0), mask_value=None),
#                 A.OpticalDistortion(p=0.5, distort_limit=0.15, shift_limit=0.15),
#                 A.GridDistortion(p=0.5, distort_limit=0.5),
                A.GridDistortion(always_apply=False, p=1.0, 
                                 num_steps=6, distort_limit=(-0.4599999785423279, 0.5), 
                                 interpolation=0, border_mode=0, 
                                 value=(0, 0, 0), mask_value=None),

#                 A.IAAPiecewiseAffine(p=0.5, scale=(0.1, 0.14)),
                ], p=0.6),
        A.Sharpen(p=1.0, alpha=(0.1,0.3), lightness=(0.3, 0.9)),
        A.GaussNoise(var_limit=(300.0, 500.0), p=0.4),
        A.ISONoise(always_apply=False, p=0.4, 
                   intensity=(0.10000000149011612, 1.399999976158142), 
                   color_shift=(0.009999999776482582, 0.4000000059604645)),

        A.OneOf([
            A.Equalize(always_apply=False, p=1.0, mode='cv', by_channels=True),
            A.Solarize(always_apply=False, p=1.0, threshold=(67, 120)),
#             A.IAAAdditiveGaussianNoise(p=1.0),
            A.GaussNoise(p=1.0),
            A.MotionBlur(always_apply=False, p=1.0, blur_limit=(5, 20))
            ], p=0.5),
        ], p=1.0)
コード例 #18
0
def solarize(m, minval=0, maxval=256):
    level = M2Level(m, minval, maxval)
    return albu.Solarize(
        threshold=(level, level),
        p=1)
コード例 #19
0
    def augment_and_save_images(self):
        _, _, files = next(os.walk(self.img_dir_path))
        counter = 1
        rotate_and_flip = A.Compose(
            [A.VerticalFlip(p=1), A.RandomRotate90(p=1)])
        elastic_aug = A.ElasticTransform(p=1,
                                         alpha=400,
                                         sigma=20,
                                         alpha_affine=0.5,
                                         border_mode=cv2.BORDER_REPLICATE,
                                         interpolation=cv2.INTER_LINEAR)
        clahe_aug = A.CLAHE(clip_limit=(4.0, 16.0),
                            tile_grid_size=(4, 4),
                            always_apply=False,
                            p=1)
        blur_aug = A.Blur(blur_limit=(5, 8), always_apply=False, p=1)
        downscale_aug = A.Downscale(scale_min=0.25,
                                    scale_max=0.4,
                                    interpolation=0,
                                    always_apply=False,
                                    p=1)
        equalize_aug = A.Equalize(mode='cv',
                                  by_channels=True,
                                  mask=None,
                                  always_apply=False,
                                  p=1)
        gaussnoise_aug = A.GaussNoise(var_limit=(50, 100),
                                      mean=0,
                                      always_apply=False,
                                      p=1)
        saturate_aug = A.HueSaturationValue(hue_shift_limit=30,
                                            sat_shift_limit=30,
                                            val_shift_limit=30,
                                            p=1)
        embos_aug = A.IAAEmboss(alpha=(0.5, 0.8),
                                strength=(0.5, 0.8),
                                always_apply=False,
                                p=1)
        sharpen_aug = A.IAASharpen(alpha=(0.5, 0.8),
                                   lightness=(0.8, 1),
                                   always_apply=False,
                                   p=1)
        solarize_aug = A.Solarize(threshold=(100, 200),
                                  always_apply=False,
                                  p=1)
        aug_dict = {
            'rotate': rotate_and_flip,
            'elastic': elastic_aug,
            'clahe': clahe_aug,
            'blur': blur_aug,
            'downscale': downscale_aug,
            'equalize': equalize_aug,
            'noise': gaussnoise_aug,
            'saturate': saturate_aug,
            'embos': embos_aug,
            'sharpen': sharpen_aug,
            'solarize': solarize_aug
        }
        required_aug_list = [
            'rotate', 'elastic', 'noise', 'saturate', 'solarize'
        ]

        for filename in files:
            image_path = self.img_dir_path + '/' + filename
            image = imageio.imread(image_path)
            label_path = self.label_dir_path + '/' + filename
            label = np.asarray(Image.open(label_path))
            for aug_type in required_aug_list:
                self.save_augmented_images(aug_name=aug_type,
                                           augmented_items=aug_dict[aug_type](
                                               image=image, mask=label),
                                           image_name=counter)
            #Following 3 if loops are separated to get extra randomness
            if bool(random.getrandbits(1)):
                self.save_augmented_images(aug_name='clahe',
                                           augmented_items=aug_dict['clahe'](
                                               image=image, mask=label),
                                           image_name=counter)
            else:
                self.save_augmented_images(
                    aug_name='equalize',
                    augmented_items=aug_dict['equalize'](image=image,
                                                         mask=label),
                    image_name=counter)
            if bool(random.getrandbits(1)):
                self.save_augmented_images(aug_name='blur',
                                           augmented_items=aug_dict['blur'](
                                               image=image, mask=label),
                                           image_name=counter)
            else:
                self.save_augmented_images(
                    aug_name='downscale',
                    augmented_items=aug_dict['downscale'](image=image,
                                                          mask=label),
                    image_name=counter)
            if bool(random.getrandbits(1)):
                self.save_augmented_images(aug_name='embos',
                                           augmented_items=aug_dict['embos'](
                                               image=image, mask=label),
                                           image_name=counter)
            else:
                self.save_augmented_images(aug_name='sharpen',
                                           augmented_items=aug_dict['sharpen'](
                                               image=image, mask=label),
                                           image_name=counter)
            counter += 1
コード例 #20
0
def get_train_transforms_mmdetection(input_size,
                                     use_crop=False,
                                     use_no_color_aug=False,
                                     use_center_crop=False,
                                     center_crop_ratio=0.9,
                                     use_gray=False):
    if isinstance(input_size, int):
        input_size = (input_size[0], input_size[1])
    return al.Compose([
        al.RandomResizedCrop(height=input_size[0],
                             width=input_size[1],
                             scale=(0.4, 1.0),
                             interpolation=0,
                             p=0.5),
        al.Resize(input_size[0], input_size[1], p=1.0),
        al.HorizontalFlip(p=0.5),
        al.OneOf([
            al.ShiftScaleRotate(border_mode=0,
                                shift_limit=(-0.2, 0.2),
                                scale_limit=(-0.2, 0.2),
                                rotate_limit=(-20, 20)),
            al.OpticalDistortion(border_mode=0,
                                 distort_limit=[-0.5, 0.5],
                                 shift_limit=[-0.5, 0.5]),
            al.GridDistortion(
                num_steps=5, distort_limit=[-0., 0.3], border_mode=0),
            al.ElasticTransform(border_mode=0),
            al.IAAPerspective(),
            al.RandomGridShuffle()
        ],
                 p=0.1),
        al.Rotate(limit=(-25, 25), border_mode=0, p=0.1),
        al.OneOf([
            al.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2),
                                        contrast_limit=(-0.2, 0.2)),
            al.HueSaturationValue(hue_shift_limit=(-20, 20),
                                  sat_shift_limit=(-30, 30),
                                  val_shift_limit=(-20, 20)),
            al.RandomGamma(gamma_limit=(30, 150)),
            al.RGBShift(),
            al.CLAHE(clip_limit=(1, 15)),
            al.ChannelShuffle(),
            al.InvertImg(),
        ],
                 p=0.1),
        al.RandomSnow(p=0.05),
        al.RandomRain(p=0.05),
        al.RandomFog(p=0.05),
        al.RandomSunFlare(num_flare_circles_lower=1,
                          num_flare_circles_upper=2,
                          src_radius=110,
                          p=0.05),
        al.RandomShadow(p=0.05),
        al.GaussNoise(var_limit=(10, 20), p=0.05),
        al.ISONoise(color_shift=(0, 15), p=0.05),
        al.MultiplicativeNoise(p=0.05),
        al.OneOf([
            al.ToGray(p=1. if use_gray else 0.05),
            al.ToSepia(p=0.05),
            al.Solarize(p=0.05),
            al.Equalize(p=0.05),
            al.Posterize(p=0.05),
            al.FancyPCA(p=0.05),
        ],
                 p=0.05),
        al.OneOf([
            al.MotionBlur(blur_limit=(3, 7)),
            al.Blur(blur_limit=(3, 7)),
            al.MedianBlur(blur_limit=3),
            al.GaussianBlur(blur_limit=3),
        ],
                 p=0.05),
        al.CoarseDropout(p=0.05),
        al.Cutout(num_holes=30,
                  max_h_size=37,
                  max_w_size=37,
                  fill_value=0,
                  p=0.05),
        al.GridDropout(p=0.05),
        al.ChannelDropout(p=0.05),
        al.Downscale(scale_min=0.5, scale_max=0.9, p=0.1),
        al.ImageCompression(quality_lower=60, p=0.2),
        al.Normalize(),
        ToTensorV2()
    ])
コード例 #21
0
def get_train_transforms_atopy(input_size,
                               use_crop=False,
                               use_no_color_aug=False):
    if use_crop:
        resize = [
            al.Resize(int(input_size * 1.2), int(input_size * 1.2)),
            al.RandomSizedCrop(min_max_height=(int(input_size * 0.6),
                                               int(input_size * 1.2)),
                               height=input_size,
                               width=input_size)
        ]
    else:
        resize = [al.Resize(input_size, input_size)]
    return al.Compose(resize + [
        al.Flip(p=0.5),
        al.OneOf([
            al.RandomRotate90(),
            al.Rotate(limit=180),
        ], p=0.5),
        al.OneOf([
            al.ShiftScaleRotate(),
            al.OpticalDistortion(),
            al.GridDistortion(),
            al.ElasticTransform(),
        ],
                 p=0.3),
        al.RandomGridShuffle(p=0.05),
        al.OneOf([
            al.RandomGamma(),
            al.HueSaturationValue(),
            al.RGBShift(),
            al.CLAHE(),
            al.ChannelShuffle(),
            al.InvertImg(),
        ],
                 p=0.1),
        al.RandomSnow(p=0.05),
        al.RandomRain(p=0.05),
        al.RandomFog(p=0.05),
        al.RandomSunFlare(p=0.05),
        al.RandomShadow(p=0.05),
        al.RandomBrightnessContrast(p=0.05),
        al.GaussNoise(p=0.2),
        al.ISONoise(p=0.2),
        al.MultiplicativeNoise(p=0.2),
        al.ToGray(p=0.05),
        al.ToSepia(p=0.05),
        al.Solarize(p=0.05),
        al.Equalize(p=0.05),
        al.Posterize(p=0.05),
        al.FancyPCA(p=0.05),
        al.OneOf([
            al.MotionBlur(blur_limit=3),
            al.Blur(blur_limit=3),
            al.MedianBlur(blur_limit=3),
            al.GaussianBlur(blur_limit=3),
        ],
                 p=0.05),
        al.CoarseDropout(p=0.05),
        al.Cutout(p=0.05),
        al.GridDropout(p=0.05),
        al.ChannelDropout(p=0.05),
        al.Downscale(p=0.1),
        al.ImageCompression(quality_lower=60, p=0.2),
        al.Normalize(),
        ToTensorV2()
    ])
コード例 #22
0
ファイル: operations.py プロジェクト: fortotal/autoalbument
 def as_transform(self, value, p):
     return A.Solarize(threshold=value, p=p)
コード例 #23
0
traindir = '/home/sunwon/cifar100'
"""# Set of Transformations"""

train_transform = A.Compose([
    A.RandomResizedCrop(always_apply=False,
                        p=1,
                        height=50,
                        width=50,
                        scale=(0.08, 1.0),
                        ratio=(0.75, 1.33333333),
                        interpolation=2),
    A.ColorJitter(0.4, 0.4, 0.4, 0.1, False, 0.8),
    A.HorizontalFlip(p=0.5),
    A.ToGray(p=0.2),
    A.Solarize(p=0.2),
    A.CLAHE(p=0.2),
    A.RandomBrightness(p=0.2),
    A.Normalize(mean=(0.4914, 0.4822, 0.4465),
                std=(0.2471, 0.2435, 0.2616),
                max_pixel_value=255.0,
                always_apply=True,
                p=1.0),
    ToTensorV2()
])

test_transform = A.Compose([
    A.Normalize(mean=(0.4914, 0.4822, 0.4465),
                std=(0.2471, 0.2435, 0.2616),
                max_pixel_value=255.0,
                always_apply=True,