Example #1
0
def apply_random_corruption(img, test=False):
    severity = np.random.choice(6)
    if severity == 0:
        return img
    else:
        img = np.array(img).astype(np.uint8)
        if test:
            corruption = np.random.choice(ic.get_corruption_names())
        else:
            corruption = np.random.choice(ic.get_corruption_names('validation'))
        corrupted = ic.corrupt(img, severity=severity, corruption_name=corruption)
        corrupted = Image.fromarray(corrupted)
        return corrupted
Example #2
0
 def _train(self):
     res = super(RayModel, self)._train()
     if self.config['state'] == 'search' and self.config['robustness']:
         corruptions = ic.get_corruption_names('validation')
         robustness_metric = self.trainer._eval_robustness(self._iteration)
         res['robustness'] = robustness_metric
         
     return res
Example #3
0
def process_image(file_path: Path, max_severity: int, output_dir: Path) -> None:
    image = load_rgb(file_path)
    for corruption in get_corruption_names():
        for severity in range(max_severity):
            corrupted = corrupt(image, corruption_name=corruption, severity=severity + 1)
            corrupted = bgr2rgb(corrupted)
            cv2.imwrite(
                str(output_dir.joinpath(f"{file_path.stem}_{corruption}_{severity + 1}{file_path.suffix}")), corrupted
            )
Example #4
0
def corrup(img):
    copyimg = img.copy()
    imglist = []
    for corruption in get_corruption_names():
        for severity in range(3):
            corrupted = corrupt(copyimg,
                                corruption_name=corruption,
                                severity=severity + 1)
            imglist.append(corrupted)
    return imglist
Example #5
0
 def _eval_robustness(self, iteration, corruptions=None, severities=None):
     if ((iteration-1) % self.hparams.perturbation_interval == 0):
         """Evaluates corruption robustness of the model"""
         if corruptions is None:
             corruptions = ic.get_corruption_names('validation')
         if severities is None:
             severities = [1,2,3,4,5]
         self.mPC = self.eval_child_model(self.meval, self.data_loader, 'val', robustness=True, corruptions=corruptions, severities=severities)
     tf.logging.info('Validation Robustness: {}'.format(self.mPC))
     return self.mPC
Example #6
0
    def test_by_comparison_with_imagecorruptions(self):
        subset_names = ["common", "validation", "all"]
        for subset in subset_names:
            with self.subTest(subset=subset):
                func_names, funcs = iaa.imgcorruptlike.get_corruption_names(
                    subset)
                func_names_exp = imagecorruptions.get_corruption_names(subset)

                assert func_names == func_names_exp
                for func_name, func in zip(func_names, funcs):
                    assert getattr(iaa.imgcorruptlike, "apply_%s" %
                                   (func_name, )) is func
Example #7
0
def main():
    args = parser.parse_args()
    
    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    elif args.resume:
        if os.path.isfile(args.resume):
            print("=> creating model '{}'".format(args.arch))
            model = models.__dict__[args.arch]()
            print("=> loading checkpoint '{}'".format(args.resume))
            # Map model to be loaded to specified single gpu.
            loc = 'cuda:{}'.format(args.gpu)
            checkpoint = torch.load(args.resume, map_location=loc)

            model.load_state_dict(checkpoint['state_dict'])
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
            exit()
        
    else:
        print("=> neither pretrained nor checkpoint model selected. Exiting...")
        exit()
        
    model = model.cuda(args.gpu)

            
    corruptions = ic.get_corruption_names()
    severities = [0,1,2,3,4,5]
    accuracies = {}
    for corruption in corruptions:
        accuracies[corruption] = {}
        for severity in severities:
            if severity == 0:
                if corruption == corruptions[0]:
                    test_loader = get_test_loader(args, corruption, severity)
                    print('Testing clean')
                    acc = validate(test_loader, model, args)
                    accuracies[corruption][severity] = torch.squeeze(acc.cpu()).item()
                else:
                    accuracies[corruption][severity] = accuracies[corruptions[0]][severity]
            else:
                test_loader = get_test_loader(args, corruption, severity)
                print('Testing %s:%d'%(corruption, severity))
                acc = validate(test_loader, model, args)
                accuracies[corruption][severity] = torch.squeeze(acc.cpu()).item()
                
    if args.resume:
        epoch = checkpoint['epoch']
    elif args.pretrained:
        epoch = 0
                
    pickle.dump(accuracies, open("robustness_epoch_{}.pkl".format(epoch, "wb"))
    
    
def validate(val_loader, model, args):
    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(
        len(val_loader),
        [batch_time, losses, top1, top5],
        prefix='Test: ')

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            images = images.cuda(args.gpu, non_blocking=True)
            target = target.cuda(args.gpu, non_blocking=True)

            # compute output
            output = model(images)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            top1.update(acc1[0], images.size(0))
            top5.update(acc5[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

        # TODO: this should also be done with the ProgressMeter
        print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
              .format(top1=top1, top5=top5))

    return top1.avg

def get_test_loader(args, corruption, severity):
    if severity == 0:
        testdir = os.path.join(args.in_clean, 'val')
        loader = torch.utils.data.DataLoader(
                datasets.ImageFolder(testdir, transforms.Compose([
                transforms.Resize(256),
                transforms.CenterCrop(224),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                    std=[0.229, 0.224, 0.225]),
            ])),
            batch_size=args.batch_size, shuffle=False,
            num_workers=args.workers, pin_memory=True)
    else:
        testdir = os.path.join(args.data, corruption, str(severity))
        loader = torch.utils.data.DataLoader(
                    datasets.ImageFolder(testdir, transforms.Compose([
                    transforms.ToTensor(),
                    transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                        std=[0.229, 0.224, 0.225]),
                ])),
                batch_size=args.batch_size, shuffle=False,
                num_workers=args.workers, pin_memory=True)
    return loader

def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res

class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self, name, fmt=':f'):
        self.name = name
        self.fmt = fmt
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

    def __str__(self):
        fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
        return fmtstr.format(**self.__dict__)


class ProgressMeter(object):
    def __init__(self, num_batches, meters, prefix=""):
        self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
        self.meters = meters
        self.prefix = prefix

    def display(self, batch):
        entries = [self.prefix + self.batch_fmtstr.format(batch)]
        entries += [str(meter) for meter in self.meters]
        print('\t'.join(entries))

    def _get_batch_fmtstr(self, num_batches):
        num_digits = len(str(num_batches // 1))
        fmt = '{:' + str(num_digits) + 'd}'
        return '[' + fmt + '/' + fmt.format(num_batches) + ']'

if __name__ == "__main__":
    main()
Example #8
0
def main():
    best_acc1 = 0
    args = parser.parse_args()
    assert args.batch_size % args.effective_bs == 0, "Effective batch size must be a divisor of batch_size"
    
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))
        
    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()
        
    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
        
    if args.train:
        writer = SummaryWriter()
        optimizer = torch.optim.Adam(model.parameters(), args.lr)
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)
        
    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                # Map model to be loaded to specified single gpu.
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                #best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)            
            model.load_state_dict(checkpoint['state_dict'])
            if args.train:
                try:
                    optimizer.load_state_dict(checkpoint['optimizer'])
                    print("=> loaded optimizer state from checkpoint")
                except:
                    print("=> optimizer state not found in checkpoint")
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))
            
    cudnn.benchmark = True
    
    # Data loading code
    traindir = os.path.join(args.data, 'train')
    norm_params = {'mean':[0.485, 0.456, 0.406],
                  'std':[0.229, 0.224, 0.225]}
    normalize = transforms.Normalize(mean=norm_params['mean'],
                                     std=norm_params['std'])
    test_loader = get_test_loader(args, normalize)
    if args.evaluate == 'corrupted':
        corrupted_test_loader = get_test_loader(args, normalize, lambda img: apply_random_corruption(img, test=True))
    
    
    if args.train:
        if args.augment_train_data:
            # as augmodel will be applied before normalization,
            train_dataset = datasets.ImageFolder(
                traindir,
                transforms.Compose([
                    transforms.RandomResizedCrop(224),
                    transforms.ToTensor(),
            ]))
            if args.augmentations:
                ops = []
                for aug in args.augmentations:
                    ops.append(augmentations.__dict__[aug])
            else:
                ops = augmentations.standard_augmentations
            # initialize augmodel
            print('Using augmentations ' + str(ops))
            augmodel = AugModel(norm_params=norm_params, augmentations=ops, augmentation_mean=args.augmentation_mean, augmentation_std=args.augmentation_std, min_magnitude=args.min_magnitude, max_magnitude=args.max_magnitude)
            if args.resume and 'augmodel_state_dict' in checkpoint.keys():
                augmodel.load_state_dict(checkpoint['augmodel_state_dict'])
            if 'AdaptiveStyleTransfer' in args.augmentations:
                augmodel.augmentations[1].initStyles(args.style_subset, seed=args.seed)
            if 'StyleTransfer' in args.augmentations and args.style_subset is not None:
                op = augmodel.augmentations[1]
                assert str(op) == 'StyleTransfer'
                pbn = op._PainterByNumbers
                assert 0 < args.style_subset < len(pbn)
                if args.seed:
                    rng_state = torch.get_rng_state() # save the pseudo-random state
                    torch.manual_seed(args.seed) # set the seed for deterministic dataset splits
                pbn_split, _ = torch.utils.data.dataset.random_split(pbn, [args.style_subset, len(pbn) - args.style_subset])
                if args.seed:
                    torch.set_rng_state(rng_state) # reset the state for non-deterministic behaviour below
                op._PainterByNumbers = pbn_split
                op.resetStyleLoader(args.effective_bs)
            if args.gpu is not None:
                augmodel = augmodel.cuda(args.gpu)
                augmodel.augmentations[1].enc_to()
                augmodel.augmentations[1].dec_to()
        else:
            train_dataset = datasets.ImageFolder(
                traindir,
                transforms.Compose([
                    transforms.RandomResizedCrop(224),
                    transforms.ToTensor(),
                    normalize
            ]))
            augmodel = None

        if args.ho:
            ho_criterion = nn.CrossEntropyLoss().cuda(args.gpu)
            ho_optimizer = torch.optim.Adam([p for p in augmodel.parameters() if p.requires_grad], args.ho_lr)
            if args.resume and 'ho_optimizer' in checkpoint.keys():
                try:
                    ho_optimizer.load_state_dict(checkpoint['ho_optimizer'])
                    print("=> loaded optimizer state from checkpoint")
                except:
                    print("=> optimizer state not found in checkpoint")
                
            # train/val split
            train_size = int(len(train_dataset) * args.train_size)
            if args.seed:
                rng_state = torch.get_rng_state() # save the pseudo-random state
                torch.manual_seed(args.seed) # set the seed for deterministic dataset splits
            train_split, val_split = torch.utils.data.dataset.random_split(train_dataset, [train_size, len(train_dataset) - train_size])
            if args.seed:
                torch.set_rng_state(rng_state) # reset the state for non-deterministic behaviour below
            if args.validation_objective == 'clean':
                val_transform = transforms.Compose([
                        transforms.Resize(256),
                        transforms.CenterCrop(224),
                        transforms.ToTensor(),
                        normalize,
                    ])
            elif args.validation_objective == 'corrupted':
                val_transform = transforms.Compose([
                        transforms.Resize(256),
                        transforms.CenterCrop(224),
                        transforms.Lambda(apply_random_corruption),
                        transforms.ToTensor(),
                        normalize,
                    ])
            # as the underlying dataset of both splits is the same, this is the only way of having separate transforms for train and val split
            val_dataset = datasets.ImageFolder(traindir, transform=val_transform)
            val_split.dataset = val_dataset
            
            train_loader = torch.utils.data.DataLoader(
                train_split, batch_size=args.batch_size, shuffle=True,
                num_workers=args.workers, pin_memory=True, drop_last=True)

            val_loader = InfiniteDataLoader(
                val_split, batch_size=args.batch_size, shuffle=True, 
                num_workers=args.workers, pin_memory=True, drop_last=True)   
        else:
            if args.path_to_stylized and not args.augment_train_data:
                stylized_imagenet = datasets.ImageFolder(root=traindir, loader=stylized_loader, transform=transforms.Compose([transforms.ToTensor(), normalize]))
                train_dataset = torch.utils.data.ConcatDataset([train_dataset, stylized_imagenet])
                
            train_loader = torch.utils.data.DataLoader(
                train_dataset, batch_size=args.batch_size, shuffle=True,
                num_workers=args.workers, pin_memory=True, drop_last=True)
            val_loader = None
            ho_criterion = None
            ho_optimizer = None
        
        # training
        for epoch in range(args.start_epoch, args.epochs):
            if args.decrease_temperature is not None and (epoch - args.start_epoch) % args.decrease_temperature == 0 and not epoch==args.start_epoch:
                augmodel.augmentations[1].temperature /= 2
            if args.increasing_alpha is not None and (epoch - args.start_epoch) % args.increasing_alpha == 0:
                op = augmodel.augmentations[1]
                assert str(op) == 'StyleTransfer'
                current_alpha = op.mu_mag
                
                ckpt = {
                    'epoch': epoch,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer' : optimizer.state_dict(),
                }
                if args.ho:
                    ckpt['augmodel_state_dict'] = augmodel.state_dict()
                    ckpt['ho_optimizer'] = ho_optimizer.state_dict()
                save_checkpoint(ckpt, is_best=False, filename='checkpoint_alpha_%1.3f.pth.tar'%(current_alpha.item()))
                
                updated_alpha = current_alpha + 0.1
                op.mu_mag = updated_alpha
                print("=> alpha=%1.2f"%(op.mu_mag.item()))
            train(train_loader, val_loader, model, augmodel, criterion, ho_criterion, optimizer, ho_optimizer, epoch, args, writer)
            is_best = False
            # evaluate on validation set
            if epoch % args.print_freq == 0:
                acc1 = validate(test_loader, model, criterion, args)
                writer.add_scalar('Metrics/test_acc', acc1, epoch)
                if args.evaluate == 'corrupted':
                    mpc = validate(corrupted_test_loader, model, criterion, args)
                    writer.add_scalar('Metrics/test_mpc', mpc, epoch)
                
                # remember best acc@1 and save checkpoint
                is_best = acc1 > best_acc1
                best_acc1 = max(acc1, best_acc1)
            
            ckpt = {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer' : optimizer.state_dict(),
            }
            
            if args.ho:
                ckpt['augmodel_state_dict'] = augmodel.state_dict()
                ckpt['ho_optimizer'] = ho_optimizer.state_dict()

            save_checkpoint(ckpt, is_best)
            
    if args.evaluate == 'clean':
        validate(test_loader, model, criterion, args)
    elif args.evaluate == 'corrupted':
        corruptions = ic.get_corruption_names('all')
        severities = [0,1,2,3,4,5]
        accuracies = {}
        for corruption in corruptions:
            accuracies[corruption] = {}
            for severity in severities:
                if severity == 0:
                    print('Testing clean')
                    acc = validate(test_loader, model, criterion, args)
                    accuracies[corruption][severity] = torch.squeeze(acc.cpu()).item()
                else:
                    print('Testing %s:%d'%(corruption, severity))
                    corrupted_loader = get_test_loader(args, normalize, lambda x: Image.fromarray(ic.corrupt(np.array(x, dtype=np.uint8), corruption_name=corruption, severity=severity)))
                    acc = validate(corrupted_loader, model, criterion, args)
                    accuracies[corruption][severity] = torch.squeeze(acc.cpu()).item()
        if args.train:
            e = args.epochs
        elif args.resume:
            e = args.start_epoch
        pickle.dump(accuracies, open("robustness_epoch_{}.pkl".format(e), "wb"))
Example #9
0
from imagecorruptions import corrupt, get_corruption_names
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import time

image = np.asarray(Image.open('test_image.jpg'))
#image = np.ones((427, 640, 3), dtype=np.uint8)

# corrupted_image = corrupt(img, corruption_name='gaussian_blur', severity=1)

for corruption in get_corruption_names('blur'):
    tic = time.time()
    for severity in range(5):
        corrupted = corrupt(image,
                            corruption_name=corruption,
                            severity=severity + 1)
        plt.imshow(corrupted)
        plt.show()
    print(corruption, time.time() - tic)
Example #10
0
            for s in np.arange(1, 6):
                corrupted_img = corrupt(img, severity=s, corruption_number=c)
                try:
                    rmse[s - 1][c] = np.sqrt(np.mean((img - corrupted_img)**2))
                except:
                    e = sys.exc_info()[0]
                    print(
                        "Error occured in file %s with index %d for corruption %d severity %d"
                        % (img_info['filename'], idx, c, s))
                    print(e)
                    exit()
        return rmse


if __name__ == "__main__":
    corruption_names = get_corruption_names('all')

    voc_test_config = mmcv.Config.fromfile('rmse_configs/voc07test.py')
    # hack the config to obtain a VocRmse dataset and add CocoRmse to the available datasets
    voc_rmse_config = voc_test_config.data.test
    voc_rmse_config.type = 'VOCRmse'
    datasets.VOCRmse = VOCRmse

    # obtain the dataloader
    voc_data = obj_from_dict(voc_rmse_config, datasets, dict(test_mode=True))
    dataloader_voc = datasets.build_dataloader(voc_data,
                                               imgs_per_gpu=1,
                                               workers_per_gpu=32,
                                               num_gpus=1,
                                               dist=False,
                                               shuffle=False)
Example #11
0
def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu

    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
            dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
                                    world_size=args.world_size, rank=args.rank)

    # create model
    print("=> creating model")
    model, preprocess = clip.load('RN50x4.pt', device='cpu', jit=False)

    if not torch.cuda.is_available():
        print('using CPU, this will be slow')
    elif args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs we have
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        model = torch.nn.DataParallel(model).cuda()

    # encode labels
    device = "cuda" if torch.cuda.is_available() else "cpu"
    with open('labels/imagenet-simple-labels.json') as f:
        labels = json.load(f)
    labels = ['a picture of a {}.'.format(l) for l in labels]

    text = clip.tokenize(labels).to(device)
    with torch.no_grad():
        text_features = get_model(model).encode_text(text)
        text_features = text_features / text_features.norm(dim=-1, keepdim=True)

    cudnn.benchmark = True

    # Data loading code
    valdir = os.path.join(args.data, 'val')

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, preprocess),
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    acc = validate(val_loader, model, text_features, args)
    print('val acc')
    print(acc.item())

    # eval corruptions
    print("corruptions acc")
    avg = 0
    output_str = ''
    for c in imagecorruptions.get_corruption_names():
        results = [c]
        for s in range(1, 6):
            valdir = os.path.join(args.data, 'corruptions', c, str(s))
            val_loader = torch.utils.data.DataLoader(
                datasets.ImageFolder(valdir, preprocess),
                batch_size=args.batch_size, shuffle=False,
                num_workers=args.workers, pin_memory=True)

            acc = validate(val_loader, model, text_features, args)
            results.append(acc.item())
            avg += acc.item()
        output_str += ','.join(map(lambda x: str(x), results)) + '\n'
    print(output_str)
    print("average corruption acc", avg / 75)
Example #12
0
from imagecorruptions import corrupt, get_corruption_names
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import time
# image = np.asarray(Image.open('demo.jpg'))
image = np.ones((427, 640, 3), dtype=np.uint8)

# corrupted_image = corrupt(img, corruption_name='gaussian_blur', severity=1)

for corruption in get_corruption_names('all'):
    tic = time.time()
    for severity in range(5):
        corrupted = corrupt(image,
                            corruption_name=corruption,
                            severity=severity + 1)
        plt.imshow(corrupted)
        plt.show()
    print(corruption, time.time() - tic)
def main():
    best_acc1 = 0
    args = parser.parse_args()
    assert args.batch_size % args.effective_bs == 0, "Effective batch size must be a divisor of batch_size"

    if len(args.gpu) < 2:
        print("Two GPU's are needed for training. Exiting...")
        exit()
    else:
        print("Use GPU: {} for training".format(args.gpu[:2]))
        gpu0 = args.gpu[0]
        gpu1 = args.gpu[1]

    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    model = model.cuda(gpu0)

    if args.train:
        writer = SummaryWriter()
        optimizer = torch.optim.SGD(model.parameters(), args.lr)
    criterion = nn.CrossEntropyLoss().cuda(gpu0)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            # Map model to be loaded to specified single gpu.
            loc = 'cuda:{}'.format(gpu0)
            checkpoint = torch.load(args.resume, map_location=loc)

            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            best_acc1 = best_acc1.to(gpu0)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer = torch.optim.SGD(model.parameters(),
                                        args.lr,
                                        momentum=args.momentum,
                                        weight_decay=args.weight_decay)
            if args.train:
                try:
                    optimizer.load_state_dict(checkpoint['optimizer'])
                    print("=> loaded optimizer state from checkpoint")
                except:
                    print("=> optimizer state not found in checkpoint")
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(args.data, 'train')
    norm_params = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]}
    normalize = transforms.Normalize(mean=norm_params['mean'],
                                     std=norm_params['std'])
    NORM = kornia.color.Normalize(mean=torch.tensor(norm_params['mean'],
                                                    dtype=torch.float32),
                                  std=torch.tensor(norm_params['std'],
                                                   dtype=torch.float32))
    test_loader = get_test_loader(args, normalize)
    if args.evaluate == 'corrupted':
        corrupted_test_loader = get_test_loader(
            args, normalize,
            lambda img: apply_random_corruption(img, test=True))

    if args.train:
        # as augmodel will be applied before normalization,
        AST = AdaptiveStyleTransfer(temperature=torch.tensor(0.1),
                                    mean=0.,
                                    logits=torch.zeros(args.style_subset,
                                                       dtype=torch.float32,
                                                       requires_grad=True))
        AST.cuda(gpu1)
        AST.initStyles(args.style_subset, seed=args.seed)

        def styletransfer(img):
            if np.random.uniform() < 0.5:
                img = AST(img)
            return NORM(img)

        train_dataset = datasets.ImageFolder(
            traindir,
            transforms.Compose([
                transforms.RandomResizedCrop(224),
                transforms.ToTensor(),
            ]))

        train_loader = torch.utils.data.DataLoader(train_dataset,
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   num_workers=args.workers,
                                                   pin_memory=True,
                                                   drop_last=True)

        # training
        for epoch in range(args.start_epoch, args.epochs):
            if args.increasing_alpha is not None and (
                    epoch - args.start_epoch) % args.increasing_alpha == 0:
                current_alpha = AST.mu_mag

                ckpt = {
                    'epoch': epoch,
                    'arch': args.arch,
                    'state_dict': model.state_dict(),
                    'best_acc1': best_acc1,
                    'optimizer': optimizer.state_dict(),
                }
                save_checkpoint(ckpt,
                                is_best=False,
                                filename='checkpoint_alpha_%1.3f.pth.tar' %
                                (current_alpha.item()))

                updated_alpha = current_alpha + 0.1
                AST.mu_mag = updated_alpha
                print('=> Alpha={}'.format(AST.mu_mag.item()))
            train(train_loader, model, styletransfer, criterion, optimizer,
                  epoch, args, writer)
            is_best = False
            # evaluate on validation set
            if epoch % args.print_freq == 0:
                acc1 = validate(test_loader, model, criterion, args)
                writer.add_scalar('Metrics/test_acc', acc1, epoch)
                if args.evaluate == 'corrupted':
                    mpc = validate(corrupted_test_loader, model, criterion,
                                   args)
                    writer.add_scalar('Metrics/test_mpc', mpc, epoch)

                # remember best acc@1 and save checkpoint
                is_best = acc1 > best_acc1
                best_acc1 = max(acc1, best_acc1)

            ckpt = {
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
            }

            save_checkpoint(ckpt, is_best)
            writer.add_scalar('AST/alpha',
                              AST.mu_mag.cpu().detach().numpy(), epoch)

    if args.evaluate == 'clean':
        validate(test_loader, model, criterion, args)
    elif args.evaluate == 'corrupted':
        corruptions = ic.get_corruption_names('all')
        severities = [0, 1, 2, 3, 4, 5]
        accuracies = {}
        for corruption in corruptions:
            accuracies[corruption] = {}
            for severity in severities:
                if severity == 0:
                    print('Testing clean')
                    acc = validate(test_loader, model, criterion, args)
                    accuracies[corruption][severity] = torch.squeeze(
                        acc.cpu()).item()
                else:
                    print('Testing %s:%d' % (corruption, severity))
                    corrupted_loader = get_test_loader(
                        args, normalize, lambda x: Image.fromarray(
                            ic.corrupt(np.array(x, dtype=np.uint8),
                                       corruption_name=corruption,
                                       severity=severity)))
                    acc = validate(corrupted_loader, model, criterion, args)
                    accuracies[corruption][severity] = torch.squeeze(
                        acc.cpu()).item()
        if args.train:
            e = args.epochs
        elif args.resume:
            e = args.start_epoch
        pickle.dump(accuracies, open("robustness_epoch_{}.pkl".format(e),
                                     "wb"))
Example #14
0
    def eval_child_model(self, model, data_loader, mode, robustness=False, corruptions=None, severities=None):
        """Evaluate the child model.

        Args:
          model: image model that will be evaluated.
          data_loader: dataset object to extract eval data from.
          mode: will the model be evalled on train, val or test.

        Returns:
          Accuracy of the model on the specified dataset.
        """
        tf.logging.info('Evaluating child model in mode {}'.format(mode))
        while True:
            try:
                if mode == 'val':
                    loader = self.data_loader.dataloader_val
                elif mode == 'test':
                    loader = self.data_loader.dataloader_test
                else:
                    raise ValueError('Not valid eval mode')
                tf.logging.info('model.batch_size is {}'.format(model.batch_size))
                if robustness:
                    if corruptions is None:
                        corruptions = ic.get_corruption_names()
                    if severities is None:
                        severities = [0,1,2,3,4,5]
                    if mode == 'val':
                        # if mode is 'val', apply a random corruption on a random severity to each image
                        correct = 0
                        count = 0
                        for images, labels in loader:
                            images = np.transpose(images.numpy(), [0,2,3,1])
                            labels = labels.numpy()
                            # produce one-hot target vector
                            labels = np.eye(model.num_classes)[labels]
                            # inverse normalization
                            means = data_loader.augmentation_transforms.MEANS[data_loader.hparams.dataset]
                            stds = data_loader.augmentation_transforms.STDS[data_loader.hparams.dataset]
                            images = ((images * stds) + means) * 255
                            # corrupt
                            images =  images.astype(np.uint8)
                            for j in range(len(images)):
                                s = np.random.choice(severities, 1)[0]
                                if s == 0:
                                    continue
                                c = np.random.choice(corruptions, 1)[0]
                                images[j] = ic.corrupt(images[j], corruption_name=c, severity=s)
                            # normalize
                            images = ((images - means) / stds) / 255.
                            preds = self.session.run(
                                model.predictions,
                                feed_dict={
                                    model.images: images,
                                    model.labels: labels,
                                })
                            correct += np.sum(
                                np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))
                            count += len(preds)
                        assert count == len(loader.dataset)
                        tf.logging.info('correct: {}, total: {}'.format(correct, count))
                        return correct / count
                    else:
                        # if mode is 'test', test all corruptions and severities on each image
                        accuracies = {c: {s: 0 for s in range(6)} for c in corruptions}
                        for c in corruptions:
                            for s in severities:
                                if (s == 0):
                                    if c == corruptions[0]:
                                        # iterate once over the clean dataset
                                        correct = 0
                                        count = 0
                                        progress_bar = tqdm.tqdm(loader)
                                        progress_bar.set_description('Clean')
                                        for images, labels in progress_bar:
                                            images = np.transpose(images.numpy(), [0,2,3,1])
                                            labels = labels.numpy()
                                            # produce one-hot target vector
                                            labels = np.eye(model.num_classes)[labels]
                                            preds = self.session.run(
                                                model.predictions,
                                                feed_dict={
                                                    model.images: images,
                                                    model.labels: labels,
                                                })
                                            correct += np.sum(
                                                np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))
                                            count += len(preds)
                                        assert count == len(loader.dataset)
                                        accuracies[c][s] = correct / count
                                    else:
                                        # clean performance has been evaluated before
                                        # and will just be copied here for convenience
                                        accuracies[c][s] = accuracies[corruptions[0]][s]
                                else:
                                    correct = 0
                                    count = 0

                                    progress_bar = tqdm.tqdm(loader)
                                    progress_bar.set_description('Corruption: {}, Severity: {}'.format(c, s))
                                    for images, labels in progress_bar:
                                        images = np.transpose(images.numpy(), [0,2,3,1])
                                        labels = labels.numpy()
                                        # produce one-hot target vector
                                        labels = np.eye(model.num_classes)[labels]
                                        # inverse normalization
                                        means = data_loader.augmentation_transforms.MEANS[data_loader.hparams.dataset]
                                        stds = data_loader.augmentation_transforms.STDS[data_loader.hparams.dataset]
                                        images = ((images * stds) + means) * 255
                                        # corrupt
                                        images =  images.astype(np.uint8)
                                        for j in range(len(images)):
                                            images[j] = ic.corrupt(images[j], corruption_name=c, severity=s)
                                        # normalize
                                        images = ((images - means) / stds) / 255.

                                        preds = self.session.run(
                                            model.predictions,
                                            feed_dict={
                                                model.images: images,
                                                model.labels: labels,
                                            })
                                        correct += np.sum(
                                            np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))
                                        count += len(preds)
                                    assert count == len(loader.dataset)
                                    accuracies[c][s] = correct / count
                    return accuracies

                else:
                    correct = 0
                    count = 0
                    for images, labels in loader:
                        images = np.transpose(images.numpy(), [0,2,3,1])
                        labels = labels.numpy()
                        # produce one-hot target vector
                        labels = np.eye(model.num_classes)[labels]
                        preds = self.session.run(
                            model.predictions,
                            feed_dict={
                                model.images: images,
                                model.labels: labels,
                            })
                        correct += np.sum(
                            np.equal(np.argmax(labels, 1), np.argmax(preds, 1)))
                        count += len(preds)
                    assert count == len(loader.dataset)
                    tf.logging.info('correct: {}, total: {}'.format(correct, count))
                    accuracy = correct / count
                    tf.logging.info(
                        'Eval child model accuracy: {}'.format(accuracy))
                    # If epoch trained without raising the below errors, break
                    # from loop.
                    break
            except (tf.errors.AbortedError, tf.errors.UnavailableError) as e:
                tf.logging.info(
                    'Retryable error caught: {}.  Retrying.'.format(e))

        return accuracy