def eval_rob_accuracy_pgd(self,
                              data_loader,
                              eps,
                              alpha,
                              steps,
                              random_start=True,
                              restart_num=1,
                              norm='Linf',
                              save_path=None,
                              verbose=True,
                              save_pred=False):
        if norm == 'Linf':
            atk = PGD(self,
                      eps=eps,
                      alpha=alpha,
                      steps=steps,
                      random_start=random_start)
        elif norm == 'L2':
            atk = PGDL2(self,
                        eps=eps,
                        alpha=alpha,
                        steps=steps,
                        random_start=random_start)
        else:
            raise ValueError('Invalid norm.')

        if restart_num > 1:
            atk = torchattacks.MultiAttack([atk] * restart_num)
        return atk.save(data_loader,
                        save_path,
                        verbose,
                        return_verbose=True,
                        save_pred=save_pred)[0]
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", default="./config/craft/example.yaml")
    parser.add_argument("--gpu", default="0", type=str)

    args = parser.parse_args()
    config, _, config_name = load_config(args.config)

    train_transform = train_transform = transforms.Compose([transforms.ToTensor()])
    train_data = CIFAR10(config["dataset_dir"], transform=train_transform, train=True)
    train_loader = DataLoader(train_data, **config["loader"])

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    gpu = torch.cuda.current_device()
    print("Set GPU to: {}".format(args.gpu))
    model = resnet18()
    model = model.cuda(gpu)
    adv_ckpt = torch.load(config["adv_model_path"], map_location="cuda:{}".format(gpu))
    model.load_state_dict(adv_ckpt)
    print(
        "Load training state from the checkpoint {}:".format(config["adv_model_path"])
    )
    if config["normalization_layer"] is not None:
        normalization_layer = NormalizeByChannelMeanStd(**config["normalization_layer"])
        normalization_layer = normalization_layer.cuda(gpu)
        print("Add a normalization layer: {} before model".format(normalization_layer))
        model = nn.Sequential(normalization_layer, model)

    pgd_config = config["pgd"]
    print("Set PGD attacker: {}.".format(pgd_config))
    max_pixel = pgd_config.pop("max_pixel")
    for k, v in pgd_config.items():
        if k == "eps" or k == "alpha":
            pgd_config[k] = v / max_pixel
    attacker = PGD(model, **pgd_config)
    attacker.set_return_type("int")

    perturbed_img = torch.zeros((len(train_data), *config["size"]), dtype=torch.uint8)
    target = torch.zeros(len(train_data))
    i = 0
    for item in tqdm(train_loader):
        # Adversarially perturb image. Note that torchattacks will automatically
        # move `img` and `target` to the gpu where the attacker.model is located.
        img = attacker(item["img"], item["target"])
        perturbed_img[i : i + len(img), :, :, :] = img.permute(0, 2, 3, 1).detach()
        target[i : i + len(item["target"])] = item["target"]
        i += img.shape[0]

    if not os.path.exists(config["adv_dataset_dir"]):
        os.makedirs(config["adv_dataset_dir"])
    adv_data_path = os.path.join(
        config["adv_dataset_dir"], "{}.npz".format(config_name)
    )
    np.savez(adv_data_path, data=perturbed_img.numpy(), targets=target.numpy())
    print("Save the adversarially perturbed dataset to {}".format(adv_data_path))
Exemplo n.º 3
0
 def __init__(self, model, eps, alpha, steps, beta, random_start=True):
     super().__init__("MART", model)
     self.record_keys = [
         "Loss", "BALoss", "WKLoss"
     ]  # Must be same as the items returned by self._do_iter
     self.atk = PGD(model, eps, alpha, steps, random_start)
     self.beta = beta
Exemplo n.º 4
0
 def __init__(self, model, eps, alpha, steps, beta, eta, inner_loss='ce'):
     super().__init__("TRADES", model)
     self.record_keys = [
         "Loss", "CELoss", "KLLoss"
     ]  # Must be same as the items returned by self._do_iter
     if inner_loss == 'ce':
         self.atk = PGD(model, eps, alpha, steps)
     elif inner_loss == 'kl':
         self.atk = TPGD(model, eps, alpha, steps)
     else:
         raise ValueError("Not valid inner loss.")
     self.beta = beta
     self.eta = eta
Exemplo n.º 5
0
 def __init__(self, model, eps, alpha, steps, beta, m, loss='ce'):
     super().__init__("BAT", model)
     self.record_keys = [
         "Loss", "CELoss", "KLLoss"
     ]  # Must be same as the items returned by self._do_iter
     if loss == 'ce':
         self.atk = PGD(model, eps, alpha, steps)
     elif loss == 'kl':
         self.atk = TPGD(model, eps, alpha, steps)
     else:
         raise ValueError(type +
                          " is not a valid type. [Options: 'ce', 'kl']")
     self.beta = beta
     self.m = m
Exemplo n.º 6
0
    def __init__(self,
                 model,
                 eps,
                 alpha,
                 steps,
                 awp_gamma=0.01,
                 proxy_lr=0.01,
                 random_start=True):
        super().__init__("AwpAT", model)
        self.record_keys = [
            "CALoss"
        ]  # Must be same as the items returned by self._do_iter
        self.atk = PGD(model, eps, alpha, steps, random_start)

        self.proxy = copy.deepcopy(self.model.model)
        self.proxy_opt = torch.optim.SGD(self.proxy.parameters(), lr=proxy_lr)
        self.awp_adversary = AdvWeightPerturb(model=self.model.model,
                                              proxy=self.proxy,
                                              proxy_optim=self.proxy_opt,
                                              gamma=awp_gamma)
Exemplo n.º 7
0
def forward(data_loader,
            model,
            criterion,
            epoch=0,
            training=True,
            optimizer=None,
            pgd=False,
            savepgd=False,
            pgditers=10,
            sd=0):
    if args.gpus and len(args.gpus) > 1:
        model = torch.nn.DataParallel(model, args.gpus[0])
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    top1 = AverageMeter()
    top5 = AverageMeter()

    end = time.time()

    if pgd:
        # print("hippyty hoppity1")
        pgd_attack = PGD(model, eps=0.031, alpha=0.008, iters=pgditers)
        # pgd_attack = BPDA(model, step_size = 1., iters = pgditers, linf=False)

    for i, (inputs, target) in enumerate(data_loader):
        # measure data loading time

        data_time.update(time.time() - end)

        inputs = inputs + torch.randn_like(inputs) * sd
        # print(inputs.max(), inputs.min())

        if pgd:
            # print("hippyty hoppity2")
            inputs_adversarial = pgd_attack(inputs, target)

        if args.gpus is not None:
            target = target.cuda()

        with torch.no_grad():
            input_var = Variable(inputs.type(args.type))

            if pgd:
                input_adversarial_var = Variable(
                    inputs_adversarial.type(args.type))

        target_var = Variable(target)
        # compute output
        output = model(input_var)

        loss = criterion(output, target_var)

        if pgd:
            # print("hippyty hoppity3")
            output_adversarial = model(input_adversarial_var)

            loss_adversarial = criterion(output_adversarial, target_var)

            loss = 0.5 * (loss + loss_adversarial)

        if type(output) is list:
            output = output[0]

        # measure accuracy and record loss
        if pgd:
            # print("hippyty hoppity4")
            prec1, prec5 = accuracy(output_adversarial.data,
                                    target,
                                    topk=(1, 5))
        else:
            prec1, prec5 = accuracy(output.data, target, topk=(1, 5))

        losses.update(loss.item(), inputs.size(0))
        top1.update(prec1.item(), inputs.size(0))
        top5.update(prec5.item(), inputs.size(0))

        if training:
            # compute gradient and do SGD step
            optimizer.zero_grad()

            with torch.autograd.set_detect_anomaly(True):
                loss.backward()
            for p in list(model.parameters()):
                if hasattr(p, 'org'):
                    p.data.copy_(p.org)
            optimizer.step()
            for p in list(model.parameters()):
                if hasattr(p, 'org'):
                    p.org.copy_(p.data.clamp_(-1, 1))

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            logging.info('{phase} - Epoch: [{0}][{1}/{2}]\t'
                         'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                         'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                         'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
                         'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
                         'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
                             epoch,
                             i,
                             len(data_loader),
                             phase='TRAINING' if training else 'EVALUATING',
                             batch_time=batch_time,
                             data_time=data_time,
                             loss=losses,
                             top1=top1,
                             top5=top5))

    return losses.avg, top1.avg, top5.avg
Exemplo n.º 8
0
def main():
    global args, best_prec1
    best_prec1 = 0
    args = parser.parse_args()
    
    ###################begin noise specific code#################################
    #grab pbobability table data from prob file
    if args.prob_file:
        prob_data = sio.loadmat(args.prob_file)
        probs = prob_data['prob']
        prob_data = sio.loadmat(args.prob_file)
        models.binarized_modules.prob_table = torch.tensor(prob_data['prob'].astype('float32')).cuda()
        models.binarized_modules.cum_sum = torch.cumsum(models.binarized_modules.prob_table, dim=-1).cuda()
        models.binarized_modules.levels = torch.tensor(prob_data['levels'].astype('float32')).cuda().squeeze()
        models.binarized_modules.step_size = 2

    
    models.binarized_modules.common_prob = args.common_prob
    models.binarized_modules.bitwise_prob = args.bitwise_prob
    models.binarized_modules.noise_inject = args.noise_inject
    models.binarized_modules.chunkwise = args.chunkwise
    
    if args.ternary:
        step_size = 1
    else:
        step_size = 2
        
    pgd = args.pgd   
    savepgd = args.savepgd
    advonly = args.advonly
    pgditers = args.pgditers
    
        
    #####################end noise specific code###########################################
    if args.evaluate:
        args.results_dir = '/home/scherupa/myBNN/results_eval/'
    if args.save == '':
        args.save = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    save_path = os.path.join(args.results_dir, args.save)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    setup_logging(os.path.join(save_path, 'log.txt'))
    results_file = os.path.join(save_path, 'results.%s')
    results = ResultsLog(results_file % 'csv', results_file % 'html')

    logging.info("saving to %s", save_path)
    logging.debug("run arguments: %s", args)

    if 'cuda' in args.type:
        args.gpus = [int(i) for i in args.gpus.split(',')]
        torch.cuda.set_device(args.gpus[0])
        cudnn.benchmark = True
    else:
        args.gpus = None



    # create model
    logging.info("creating model %s", args.model)
    model = models.__dict__[args.model]
    model_config = {'input_size': args.input_size, 'dataset': args.dataset}
    
    # create model
    logging.info("creating base model %s", args.basemodel)
    model_base = models.__dict__[args.basemodel]
    model_base_config = {'input_size': args.input_size, 'dataset': args.dataset}

    if args.model_config != '':
        model_config = dict(model_config, **literal_eval(args.model_config))
        model_base_config = dict(model_config, **literal_eval(args.model_config))

    model = model(**model_config)
    logging.info("created model with configuration: %s", model_config)
    
    model_base = model_base(**model_config)
    cpt = torch.load(args.base)
    model_base.load_state_dict(cpt['state_dict'])
    logging.info("created base model with configuration: %s", model_config)

    # optionally resume from a checkpoint
    if args.evaluate:
        if not os.path.isfile(args.evaluate):
            parser.error('invalid checkpoint: {}'.format(args.evaluate))
        checkpoint = torch.load(args.evaluate)
        model.load_state_dict(checkpoint['state_dict'])
        logging.info("loaded checkpoint '%s' (epoch %s)",
                     args.evaluate, checkpoint['epoch'])
    elif args.resume:
        checkpoint_file = args.resume
        if os.path.isdir(checkpoint_file):
            results.load(os.path.join(checkpoint_file, 'results.csv'))
            checkpoint_file = os.path.join(
                checkpoint_file, 'model_best.pth.tar')
        if os.path.isfile(checkpoint_file):
            logging.info("loading checkpoint '%s'", args.resume)
            checkpoint = torch.load(checkpoint_file)
            args.start_epoch = checkpoint['epoch'] - 1
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            logging.info("loaded checkpoint '%s' (epoch %s)",
                         checkpoint_file, checkpoint['epoch'])
        else:
            logging.error("no checkpoint found at '%s'", args.resume)

    num_parameters = sum([l.nelement() for l in model.parameters()])
    logging.info("number of parameters: %d", num_parameters)

    # Data loading code
    default_transform = {
        'train': get_transform(args.dataset,
                               input_size=args.input_size, augment=True),
        'eval': get_transform(args.dataset,
                              input_size=args.input_size, augment=False)
    }
    transform = getattr(model, 'input_transform', default_transform)
    regime = getattr(model, 'regime', {0: {'optimizer': args.optimizer,
                                           'lr': args.lr,
                                           'momentum': args.momentum,
                                           'weight_decay': args.weight_decay}})
    # define loss function (criterion) and optimizer
    criterion = getattr(model, 'criterion', nn.CrossEntropyLoss)()
    criterion.type(args.type)
    model.type(args.type)
    model_base.type(args.type)
    

    val_data = get_dataset(args.dataset, 'val', transform['eval'])
    val_loader = torch.utils.data.DataLoader(
        val_data,
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)
    
    fname = "cifar10_pgd_resnet_binary"

    if savepgd:
    
        
        savepgd_attack = PGD(model_base, eps=0.031, alpha=2/255, iters=pgditers) 
        # savepgd_attack.set_mode('int')
        
        savepgd_attack.save(data_loader=val_loader, file_name=fname, accuracy=True, advonly=advonly)
    
    
    # adv_images, ori_images, ori_labels, ori_pred, adv_pred = torch.load(fname)   

    adv_images, adv_pred = torch.load(fname) 
     
    # adv_data = torch.utils.data.TensorDataset(adv_images.float() / adv_images.max(), ori_images.float(), ori_pred, adv_pred)
    adv_data = torch.utils.data.TensorDataset(adv_images, adv_pred)
    adv_loader = torch.utils.data.DataLoader(adv_data,
        batch_size=args.batch_size, shuffle=True,
        num_workers=args.workers, pin_memory=True)
    
           
           
       
    if args.evaluate:
        val_loss, val_prec1, val_prec5 = validate(adv_loader, model, criterion, 0, pgd=pgd, savepgd=savepgd)
        logging.info('\n'
                     'Validation Loss {val_loss:.4f} \t'
                     'Validation Prec@1 {val_prec1:.3f} \t'
                     'Validation Prec@5 {val_prec5:.3f} \n'
                     .format(val_loss=val_loss, val_prec1=val_prec1, 
                             val_prec5=val_prec5))
        return
    
   
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
    logging.info('training regime: %s', regime)


    for epoch in range(args.start_epoch, args.epochs):
        optimizer = adjust_optimizer(optimizer, epoch, regime)

        # train for one epoch
        train_loss, train_prec1, train_prec5 = train(
            adv_loader, model, criterion, epoch, optimizer, pgd=pgd)

        # evaluate on validation set
        val_loss, val_prec1, val_prec5 = validate(
            val_loader, model, criterion, epoch, pgd=pgd)

        # remember best prec@1 and save checkpoint
        is_best = val_prec1 > best_prec1
        best_prec1 = max(val_prec1, best_prec1)

        save_checkpoint({
            'epoch': epoch + 1,
            'model': args.model,
            'config': args.model_config,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
            'regime': regime
        }, is_best, path=save_path)
        logging.info('\n Epoch: {0}\t'
                     'Training Loss {train_loss:.4f} \t'
                     'Training Prec@1 {train_prec1:.3f} \t'
                     'Training Prec@5 {train_prec5:.3f} \t'
                     'Validation Loss {val_loss:.4f} \t'
                     'Validation Prec@1 {val_prec1:.3f} \t'
                     'Validation Prec@5 {val_prec5:.3f} \n'
                     .format(epoch + 1, train_loss=train_loss, val_loss=val_loss,
                             train_prec1=train_prec1, val_prec1=val_prec1,
                             train_prec5=train_prec5, val_prec5=val_prec5))

        results.add(epoch=epoch + 1, train_loss=train_loss, val_loss=val_loss,
                    train_error1=100 - train_prec1, val_error1=100 - val_prec1,
                    train_error5=100 - train_prec5, val_error5=100 - val_prec5)
        #results.plot(x='epoch', y=['train_loss', 'val_loss'],
        #             title='Loss', ylabel='loss')
        #results.plot(x='epoch', y=['train_error1', 'val_error1'],
        #             title='Error@1', ylabel='error %')
        #results.plot(x='epoch', y=['train_error5', 'val_error5'],
        #             title='Error@5', ylabel='error %')
        results.save()
                                          batch_size=1,
                                          shuffle=False)
#model = ResNet18()
#model = Holdout()
#model = Target()
#model.load_state_dict(torch.load("./model/target.pth"))
#model.load_state_dict(torch.load("./model/GOOGLENET.pth"))['net']
#model.load_state_dict(torch.load("./model/VGG16.pth"))['net']
#model.load_state_dict(torch.load("./model/target.pth"))

model = VGG('VGG16')
checkoutpoint = torch.load("./model/VGG16.pth")['net']
#model = GoogLeNet()
#model = ResNet18()
model = torch.nn.DataParallel(model)
model.load_state_dict(checkoutpoint)

model = model.eval().cuda()

pgd_attack = PGD(model, eps=10/255, alpha=2/255, steps=7)
pgd_attack.set_return_type('int') # Save as integ
#pgd_attack.save(data_loader=test_loader, save_path="./data/cifar10_Resnet18_pgd.pt", verbose=True)
pgd_attack.save(data_loader=test_loader, save_path="./data/cifar10_VGG16_pgd.pt", verbose=True)
#pdb.set_trace()

#adv_images, adv_labels = torch.load("./data/cifar10_Process_pgd.pt")
#adv_data = TensorDataset(adv_images.float()/255, adv_labels)
#adv_loader = DataLoader(adv_data, batch_size=1, shuffle=False)