Exemplo n.º 1
0
def main():
    # Reproducibility
    np.random.seed(cfg['seed'])
    torch.manual_seed(cfg['seed'])

    # Model & Optimizer
    model = getModel(cfg['model'])
    start_epoch = 1
    max_epoch = 1

    # Load model and optimizer
    if cfg['load_ckpt'] != '':
        checkpoint = torch.load(cfg['load_ckpt'], map_location="cpu")
        model.load_state_dict(checkpoint['model_state'])
        print("load model on '{}' is complete.".format(cfg['load_ckpt']))
    cudnn.benchmark = True

    # Data Loader
    in_valid_loader = getDataLoader(ds_cfg=cfg['in_dataset'],
                                    dl_cfg=cfg['dataloader'],
                                    split="valid")

    if 'targets' in cfg['in_dataset'].keys():
        exp_dir = os.path.join(cfg['exp_root'], cfg['exp_dir'], "logits",
                               cfg['in_dataset']['dataset'],
                               cfg['in_dataset']['targets'][0])
    else:
        exp_dir = os.path.join(cfg['exp_root'], cfg['exp_dir'], "logits",
                               cfg['in_dataset']['dataset'])

    # Result directory and make tensorboard event file
    if os.path.exists(exp_dir) is False:
        os.makedirs(exp_dir)

    # Outlier detector
    detector_func = detectors.getDetector(cfg['detector'])
    global_cfg['detector'] = cfg['detector']

    # Outlier detector
    print("=======================IMPORTANT CONFIG=======================")
    print(" Model    : {}\n \
Detector     : {}\n".format(cfg['model']['network_kind'],
                            cfg['detector']['detector']))
    print(
        "========Start logits extraction for GODIN. Result will be saved in {}"
        .format(exp_dir))

    valid_summary = valid_epoch_wo_outlier(model, in_valid_loader,
                                           detector_func)
    summary_log = "Acc [{}]\n".format(valid_summary['classifier_acc'])
    print(summary_log)

    torch.save(valid_summary['f_confidences'],
               os.path.join(exp_dir, 'f_confidences.pt'))
    torch.save(valid_summary['h_confidences'],
               os.path.join(exp_dir, 'h_confidences.pt'))
    torch.save(valid_summary['targets'], os.path.join(exp_dir, 'targets.pt'))
    torch.save(valid_summary['logits'], os.path.join(exp_dir, 'logits.pt'))
Exemplo n.º 2
0
def main():
    # Reproducibility
    np.random.seed(cfg['seed'])
    torch.manual_seed(cfg['seed'])
    
    # Model & Optimizer
    model = getModel(cfg['model'])
    start_epoch = 1
    max_epoch = 1
    
    # Load model and optimizer
    if cfg['load_ckpt'] != '':
        checkpoint = torch.load(cfg['load_ckpt'], map_location="cpu")
        model.load_state_dict(checkpoint['model_state'])
        print("load model on '{}' is complete.".format(cfg['load_ckpt']))
    cudnn.benchmark = True
    
    # Data Loader
    in_valid_loader = getDataLoader(ds_cfg=cfg['in_dataset'],
                                    dl_cfg=cfg['dataloader'],
                                    split="valid")
    

    exp_dir = os.path.join(cfg['exp_root'], cfg['exp_dir'])
    
    # Result directory and make tensorboard event file
    if os.path.exists(exp_dir) is False:
        os.makedirs(exp_dir)    
    
    # Loss function
    loss_func = losses.getLoss(cfg['loss'])
    
    # Outlier detector
    print("=======================IMPORTANT CONFIG=======================")
    print(" Model    : {}\n \
Loss     : {}\n".format(cfg['model']['network_kind'], cfg['loss']['loss']))
    print("========Start epsilon regression for GODIN. Result will be saved in {}".format(exp_dir))
    
    logfile = open(os.path.join(exp_dir, "epsilon.txt"), "w")
    
    epsilon = 0.0
    max_epsilon = 1.0
    step = 0.1
    while epsilon <= max_epsilon:
        valid_summary = valid_epoch_wo_outlier(model, in_valid_loader, epsilon)
        summary_log = "Epsilon [{}] => Score [{}]\n".format(epsilon, valid_summary['score'])
        print(summary_log)
        logfile.write(summary_log)
        epsilon += step
            
    logfile.close()
Exemplo n.º 3
0
def main():
    global global_cfg
    # Reproducibility
    np.random.seed(cfg['seed'])
    torch.manual_seed(cfg['seed'])
    
    # Model & Optimizer
    model = getModel(cfg['model'])
    model.rot_head = nn.Linear(model.nChannels, 4)
    model.rot_head.cuda()
    optimizer = optim.getOptimizer(model, cfg['optim'])
    start_epoch = 1
    
    # Load model and optimizer
    if cfg['load_ckpt'] != '':
        checkpoint = torch.load(cfg['load_ckpt'], map_location="cpu")
        model.load_state_dict(checkpoint['model_state'])
        print("load model on '{}' is complete.".format(cfg['load_ckpt']))
        if not cfg['finetuning']:
            optimizer.load_state_dict(checkpoint['optimizer_state'])
        if 'epoch' in checkpoint.keys() and not cfg['finetuning']:
            start_epoch = checkpoint['epoch']
            print("Restore epoch {}".format(start_epoch))
        else:
            start_epoch = 1
    cudnn.benchmark = True
    
    # Data Loader
    in_train_loader = getDataLoader(ds_cfg=cfg['in_dataset'],
                                    dl_cfg=cfg['dataloader'],
                                    split="train")
    in_valid_loader = getDataLoader(ds_cfg=cfg['in_dataset'],
                                    dl_cfg=cfg['dataloader'],
                                    split="valid")
    attack_in = None
    if 'PGD' in cfg.keys() and cfg['PGD'] is not None:
        attack_in = RotPGDAttack(model=model, eps=cfg['PGD']['epsilon'],
                                  nb_iter=cfg['PGD']['iters'],
                              eps_iter=cfg['PGD']['iter_size'], rand_init=True,
                                  loss_func='CE')
    
    if cfg['out_dataset'] is not None:
        out_train_loader = getDataLoader(ds_cfg=cfg['out_dataset'],
                                         dl_cfg=cfg['dataloader'],
                                         split="train")
        out_valid_loader = getDataLoader(ds_cfg=cfg['out_dataset'],
                                         dl_cfg=cfg['dataloader'],
                                         split="valid")
    else:
        out_train_loader = None
        out_valid_loader = None
    
    # Result directory and make tensorboard event file
    exp_dir = os.path.join(cfg['exp_root'], cfg['exp_dir'])
    if os.path.exists(exp_dir) is False:
        os.makedirs(exp_dir)
    shutil.copy('./config.py', os.path.join(exp_dir, "config.py"))
    writer_train = SummaryWriter(logdir=os.path.join(exp_dir, 'log', 'train'))
    writer_valid = SummaryWriter(logdir=os.path.join(exp_dir, 'log', 'valid'))
    
    # Stats Meters
    #train_meter = TrainMeter()
    #valid_meter = ValidMeter()
    
    # Loss function
    global_cfg['loss'] = cfg['loss']
    
    # Outlier detector
    detector_func = detectors.getDetector(cfg['detector'])
    global_cfg['detector'] = cfg['detector']
    
    print("=======================IMPORTANT CONFIG=======================")
    print(" Model    : {}\n \
Loss     : {}\n \
Detector : {}\n \
Optimizer: {}\n".format(cfg['model']['network_kind'], cfg['loss']['loss'], cfg['detector']['detector'], cfg['optim']['optimizer']))
    print("============Start training. Result will be saved in {}".format(exp_dir))
    
    for cur_epoch in range(start_epoch, cfg['max_epoch'] + 1):
        if out_train_loader is not None:
            train_summary = train_epoch_w_outlier(model, optimizer, in_train_loader, out_train_loader, loss_func, detector_func, cur_epoch, cfg['optim'], writer_train)
        else:
            train_summary = train_epoch_wo_outlier(model, optimizer, in_train_loader, attack_in, cur_epoch, cfg['optim'], writer_train)
        summary_write(summary=train_summary, writer=writer_train)
        print("Training result=========Epoch [{}]/[{}]=========\nlr: {} | loss: {} | acc: {}".format(cur_epoch, cfg['max_epoch'], train_summary['lr'], train_summary['avg_loss'], train_summary['classifier_acc']))
        
        
        if cur_epoch % cfg['valid_epoch'] == 0:
            if out_valid_loader is not None:
                valid_summary = valid_epoch_w_outlier(model, in_valid_loader, out_valid_loader, loss_func, detector_func, cur_epoch)
            else:
                valid_summary = valid_epoch_wo_outlier(model, in_valid_loader, cur_epoch)
            summary_write(summary=valid_summary, writer=writer_valid)
            print("Validate result=========Epoch [{}]/[{}]=========\nloss: {} | acc: {}".format(cur_epoch, cfg['max_epoch'], valid_summary['avg_loss'], valid_summary['classifier_acc']))
        
        if cur_epoch % cfg['ckpt_epoch'] == 0:
            ckpt_dir = os.path.join(cfg['exp_root'], cfg['exp_dir'], "ckpt")
            if os.path.exists(ckpt_dir) is False:
                os.makedirs(ckpt_dir)
            model_state = model.module.state_dict() if cfg['ngpu'] > 1 else model.state_dict()
            checkpoint = {
                "epoch": cur_epoch,
                "model_state": model_state,
                "optimizer_state": optimizer.state_dict(),
            }
            ckpt_name = "checkpoint_epoch_{}".format(cur_epoch)
            ckpt_path = os.path.join(ckpt_dir, ckpt_name + ".pyth")
            torch.save(checkpoint, ckpt_path)
Exemplo n.º 4
0
def main():
    # Reproducibility
    np.random.seed(cfg['seed'])
    torch.manual_seed(cfg['seed'])

    # Model & Optimizer
    model = getModel(cfg['model'])
    start_epoch = 1
    max_epoch = 1

    assert len(cfg['load_ckpt']) == len(cfg['in_dataset']['targets']) + 1
    # Load model and optimizer
    for idx, ckpt in enumerate(cfg['load_ckpt']):
        checkpoint = torch.load(ckpt, map_location="cpu")
        if idx == 0:
            model.ava_network.load_state_dict(checkpoint['model_state'])
        else:
            model.ova_networks[idx - 1].load_state_dict(
                checkpoint['model_state'])
        print("load model on '{}' is complete.".format(ckpt))
    cudnn.benchmark = True

    # Data Loader
    in_valid_loader = getDataLoader(ds_cfg=cfg['in_dataset'],
                                    dl_cfg=cfg['dataloader'],
                                    split="valid")

    if cfg['out_dataset'] is not None:
        out_valid_loader = getDataLoader(ds_cfg=cfg['out_dataset'],
                                         dl_cfg=cfg['dataloader'],
                                         split="valid")
        exp_dir = os.path.join(cfg['exp_root'], cfg['exp_dir'], "valid",
                               cfg['out_dataset']['dataset'])
    else:
        out_train_loader = None
        out_valid_loader = None
        exp_dir = os.path.join(cfg['exp_root'], cfg['exp_dir'], "valid",
                               "classifier")

    # Result directory and make tensorboard event file
    if os.path.exists(exp_dir) is False:
        os.makedirs(exp_dir)
    shutil.copy('./config.py', os.path.join(exp_dir, "val_config.py"))

    # Loss function
    loss_func = losses.getLoss(cfg['loss'])
    global_cfg['loss'] = cfg['loss']

    # Outlier detector
    detector_func = detectors.getDetector(cfg['detector'])
    global_cfg['detector'] = cfg['detector']
    print("=======================IMPORTANT CONFIG=======================")
    print(" Model    : {}\n \
Loss     : {}\n \
Detector : {}\n".format(cfg['model']['network_kind'], cfg['loss']['loss'],
                        cfg['detector']['detector']))
    print(
        "========Start validation. Result will be saved in {}".format(exp_dir))

    logfile = open(os.path.join(exp_dir, "validation_log.txt"), "w")
    logfile2 = open(os.path.join(exp_dir, "wrong_predict_log.txt"), "w")
    for cur_epoch in range(start_epoch, max_epoch + 1):
        if out_valid_loader is not None:
            valid_summary = valid_epoch_w_outlier(model, in_valid_loader,
                                                  out_valid_loader, loss_func,
                                                  detector_func, cur_epoch,
                                                  logfile2)
            summary_log = "=============Epoch [{}]/[{}]=============\nloss: {} | acc: {} | acc_w_ood: {}\nAUROC: {} | AUPR: {} | FPR95: {}\nInlier Conf. {} | Outlier Conf. {}\n".format(
                cur_epoch, max_epoch, valid_summary['avg_loss'],
                valid_summary['classifier_acc'], valid_summary['acc'],
                valid_summary['AUROC'], valid_summary['AUPR'],
                valid_summary['FPR95'], valid_summary['inlier_confidence'],
                valid_summary['outlier_confidence'])

            ind_max, ind_min = np.max(valid_summary['inliers']), np.min(
                valid_summary['inliers'])
            ood_max, ood_min = np.max(valid_summary['outliers']), np.min(
                valid_summary['outliers'])

            ranges = (ind_min if ind_min < ood_min else ood_min,
                      ind_max if ind_max > ood_max else ood_max)

            fig = plt.figure()
            sns.distplot(valid_summary['inliers'].ravel(),
                         hist_kws={'range': ranges},
                         kde=False,
                         bins=50,
                         norm_hist=True,
                         label='In-distribution')
            sns.distplot(valid_summary['outliers'],
                         hist_kws={'range': ranges},
                         kde=False,
                         bins=50,
                         norm_hist=True,
                         label='Out-of-distribution')
            plt.xlabel('Confidence')
            plt.ylabel('Density')
            fig.legend()
            fig.savefig(os.path.join(exp_dir, "confidences.png"))

        else:
            valid_summary = valid_epoch_wo_outlier(model, in_valid_loader,
                                                   loss_func, cur_epoch,
                                                   logfile2)
            summary_log = "=============Epoch [{}]/[{}]=============\nloss: {} | acc: {}\n".format(
                cur_epoch, max_epoch, valid_summary['avg_loss'],
                valid_summary['classifier_acc'])

        print(summary_log)
        logfile.write(summary_log)

    logfile.close()
    logfile2.close()
Exemplo n.º 5
0
def main(cfg):
    # set the path to pre-trained model and output
    pre_trained_net = cfg['load_ckpt']
    outf=os.path.join(cfg['exp_root'],cfg['exp_dir'],'valid','MD_ADV')
    if os.path.isdir(outf) == False:
        os.makedirs(outf)
        
    torch.cuda.manual_seed(cfg['seed'])
    device = torch.device('cuda:0')    
    
    # check the in-distribution dataset
    num_classes=cfg['model']['num_classes']
    if cfg['detector']['adv_method'] == 'FGSM':
        adv_noise = 0.05
    elif cfg['detector']['adv_method'] == 'BIM':
        adv_noise = 0.01
    elif cfg['detector']['adv_method'] == 'DeepFool':
        if cfg['model']['net_type'] == 'resnet':
            if cfg['in_dataset']['dataset'] == 'cifar10':
                adv_noise = 0.18
            elif cfg['in_dataset']['dataset'] == 'cifar100':
                adv_noise = 0.03
            else:
                adv_noise = 0.1
        else:
            if cfg['in_dataset']['dataset'] == 'cifar10':
                adv_noise = 0.6
            elif cfg['in_dataset']['dataset'] == 'cifar100':
                adv_noise = 0.1
            else:
                adv_noise = 0.5

    # load networks
#     if args.net_type == 'densenet':
#         if args.dataset == 'svhn':
    model = getModel(cfg['model'])
    net_type='resnet'    ## Should change in the future
    checkpoint = torch.load(cfg['load_ckpt'], map_location="cpu")
    model.load_state_dict(checkpoint['model_state'])

    in_transform = cfg['in_dataset']['valid_transform']
        
    min_pixel = -2.42906570435
    max_pixel = 2.75373125076
    if cfg['in_dataset']['dataset'] == 'cifar10':
        if cfg['detector']['adv_method'] == 'FGSM':
            random_noise_size = 0.25 / 4
        elif cfg['detector']['adv_method'] == 'BIM':
            random_noise_size = 0.13 / 2
        elif cfg['detector']['adv_method'] == 'DeepFool':
            random_noise_size = 0.25 / 4
        elif cfg['detector']['adv_method'] == 'CWL2':
                random_noise_size = 0.05 / 2
    elif cfg['in_dataset']['dataset'] == 'cifar100':
        if cfg['detector']['adv_method'] == 'FGSM':
            random_noise_size = 0.25 / 8
        elif cfg['detector']['adv_method'] == 'BIM':
                random_noise_size = 0.13 / 4
        elif cfg['detector']['adv_method'] == 'DeepFool':
                random_noise_size = 0.13 / 4
        elif cfg['detector']['adv_method'] == 'CWL2':
                random_noise_size = 0.05 / 2
    else:
        if cfg['detector']['adv_method'] == 'FGSM':
            random_noise_size = 1
        elif cfg['detector']['adv_method'] == 'BIM':
            random_noise_size = 1
        elif cfg['detector']['adv_method'] == 'DeepFool':
            random_noise_size = 1
        elif cfg['detector']['adv_method'] == 'CWL2':
            random_noise_size = 1
            
    model.cuda()
    print("load model on '{}' is completed.".format(cfg['load_ckpt']))
    
    # load dataset
    print('load target data: ', cfg['in_dataset']['dataset'])
    test_loader =  getDataLoader(ds_cfg=cfg['in_dataset'],
                                    dl_cfg=cfg['dataloader'],
                                    split='valid')

    
    print('Attack: ' + cfg['detector']['adv_method']  +  ', Dist: ' + cfg['detector']['adv_method'] + '\n')
    model.eval()
    adv_data_tot, clean_data_tot, noisy_data_tot = 0, 0, 0
    label_tot = 0
    
    correct, adv_correct, noise_correct = 0, 0, 0
    total, generated_noise = 0, 0

    criterion = nn.CrossEntropyLoss().cuda()

    selected_list = []
    selected_index = 0
    
    for data, target in test_loader:
        data, target = data.cuda(), target.cuda()
        with torch.no_grad(): data, target = Variable(data), Variable(target)
        output = model(data)

        # compute the accuracy
        pred = output.data.max(1)[1]
        equal_flag = pred.eq(target.data).cpu()
        correct += equal_flag.sum()

        noisy_data = torch.add(data.data, torch.randn(data.size()).cuda(), alpha = random_noise_size) 
        noisy_data = torch.clamp(noisy_data, min_pixel, max_pixel)

        if total == 0:
            clean_data_tot = data.clone().data.cpu()
            label_tot = target.clone().data.cpu()
            noisy_data_tot = noisy_data.clone().cpu()
        else:
            clean_data_tot = torch.cat((clean_data_tot, data.clone().data.cpu()),0)
            label_tot = torch.cat((label_tot, target.clone().data.cpu()), 0)
            noisy_data_tot = torch.cat((noisy_data_tot, noisy_data.clone().cpu()),0)
            
        # generate adversarial
        model.zero_grad()
        inputs = Variable(data.data, requires_grad=True)
        output = model(inputs)
        loss = criterion(output, target)
        loss.backward()

        if cfg['detector']['adv_method'] == 'FGSM': 
            gradient = torch.ge(inputs.grad.data, 0)
            gradient = (gradient.float()-0.5)*2
            if cfg['model']['net_type'] == 'densenet':
                gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([0]).cuda()) / (63.0/255.0))
                gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([1]).cuda()) / (62.1/255.0))
                gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([2]).cuda()) / (66.7/255.0))
            else:
                gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([0]).cuda()) / (0.2023))
                gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([1]).cuda()) / (0.1994))
                gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                     gradient.index_select(1, torch.LongTensor([2]).cuda()) / (0.2010))

        elif cfg['detector']['adv_method'] == 'BIM': 
            gradient = torch.sign(inputs.grad.data)
            for k in range(5):
                inputs = torch.add(inputs.data, gradient, alpha = adv_noise)
                inputs = torch.clamp(inputs, min_pixel, max_pixel)
                inputs = Variable(inputs, requires_grad=True)
                output = model(inputs)
                loss = criterion(output, target)
                loss.backward()
                gradient = torch.sign(inputs.grad.data)
                if cfg['model']['net_type'] == 'densenet':
                    gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([0]).cuda()) / (63.0/255.0))
                    gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([1]).cuda()) / (62.1/255.0))
                    gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([2]).cuda()) / (66.7/255.0))
                else:
                    gradient.index_copy_(1, torch.LongTensor([0]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([0]).cuda()) / (0.2023))
                    gradient.index_copy_(1, torch.LongTensor([1]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([1]).cuda()) / (0.1994))
                    gradient.index_copy_(1, torch.LongTensor([2]).cuda(), \
                                         gradient.index_select(1, torch.LongTensor([2]).cuda()) / (0.2010))

        if cfg['detector']['adv_method'] == 'DeepFool':
            _, adv_data = adversary.deepfool(model, data.data.clone(), target.data.cpu(), \
                                             args.num_classes, step_size=adv_noise, train_mode=False)
            adv_data = adv_data.cuda()
        elif cfg['detector']['adv_method'] == 'CWL2':
            _, adv_data = adversary.cw(model, data.data.clone(), target.data.cpu(), 1.0, 'l2', crop_frac=1.0)
        else:
            adv_data = torch.add(inputs.data, gradient, alpha = adv_noise)
            
        adv_data = torch.clamp(adv_data, min_pixel, max_pixel)
        
        # measure the noise 
        temp_noise_max = torch.abs((data.data - adv_data).view(adv_data.size(0), -1))
        temp_noise_max, _ = torch.max(temp_noise_max, dim=1)
        generated_noise += torch.sum(temp_noise_max)


        if total == 0:
            flag = 1
            adv_data_tot = adv_data.clone().cpu()
        else:
            adv_data_tot = torch.cat((adv_data_tot, adv_data.clone().cpu()),0)

        with torch.no_grad(): output = model(Variable(adv_data))
        # compute the accuracy
        pred = output.data.max(1)[1]
        equal_flag_adv = pred.eq(target.data).cpu()
        adv_correct += equal_flag_adv.sum()
        
        with torch.no_grad(): output = model(Variable(noisy_data))
        # compute the accuracy
        pred = output.data.max(1)[1]
        equal_flag_noise = pred.eq(target.data).cpu()
        noise_correct += equal_flag_noise.sum()
        
        for i in range(data.size(0)):
            if equal_flag[i] == 1 and equal_flag_noise[i] == 1 and equal_flag_adv[i] == 0:
                selected_list.append(selected_index)
            selected_index += 1
            
        total += data.size(0)

    selected_list = torch.LongTensor(selected_list)
    clean_data_tot = torch.index_select(clean_data_tot, 0, selected_list)
    adv_data_tot = torch.index_select(adv_data_tot, 0, selected_list)
    noisy_data_tot = torch.index_select(noisy_data_tot, 0, selected_list)
    label_tot = torch.index_select(label_tot, 0, selected_list)

    torch.save(clean_data_tot, '%s/clean_data_%s_%s_%s.pth' % (args.outf, args.net_type, args.dataset, args.adv_type))
    torch.save(adv_data_tot, '%s/adv_data_%s_%s_%s.pth' % (args.outf, args.net_type, args.dataset, args.adv_type))
    torch.save(noisy_data_tot, '%s/noisy_data_%s_%s_%s.pth' % (args.outf, args.net_type, args.dataset, args.adv_type))
    torch.save(label_tot, '%s/label_%s_%s_%s.pth' % (args.outf, args.net_type, args.dataset, args.adv_type))

    print('Adversarial Noise:({:.2f})\n'.format(generated_noise / total))
    print('Final Accuracy: {}/{} ({:.2f}%)\n'.format(correct, total, 100. * correct / total))
    print('Adversarial Accuracy: {}/{} ({:.2f}%)\n'.format(adv_correct, total, 100. * adv_correct / total))
    print('Noisy Accuracy: {}/{} ({:.2f}%)\n'.format(noise_correct, total, 100. * noise_correct / total))
Exemplo n.º 6
0
writer_valid = SummaryWriter(logdir=os.path.join(outf, 'log', 'valid'))
writer_test = SummaryWriter(logdir=os.path.join(outf, 'log', 'test'))
writer_train = SummaryWriter(logdir=os.path.join(outf, 'log', 'train'))

# check the in-distribution dataset
num_classes = cfg['model']['num_classes']

best_auroc, best_result, best_index = 0, 0, 0
temp_file_name_1 = '%s/ADV_and_RANDOM_NOISE_SIZE_TRAIN.txt' % (outf)
f = open(temp_file_name_1, 'w')
temp_file_name_2 = '%s/ADV_and_RANDOM_NOISE_SIZE_VALID.txt' % (outf)
g = open(temp_file_name_2, 'w')
temp_file_name_3 = '%s/ADV_and_RANDOM_NOISE_SIZE_TEST.txt' % (outf)
h = open(temp_file_name_3, 'w')

model = getModel(cfg['model'])
checkpoint = torch.load(cfg['load_ckpt'], map_location="cpu")
model.load_state_dict(checkpoint['model_state'])

min_pixel = cfg['min']
max_pixel = cfg['max']

model.cuda()
print("load model on '{}' is completed.".format(cfg['load_ckpt']))

# load dataset
test_loader = getDataLoader(ds_cfg=cfg['in_dataset'],
                            dl_cfg=cfg['dataloader'],
                            split='valid')

fooled_log = open(os.path.join(outf, 'fooled.txt'), 'w')
Exemplo n.º 7
0
def main():
    global global_cfg
    # Reproducibility
    np.random.seed(cfg['seed'])
    torch.manual_seed(cfg['seed'])
    start_epoch = 1

    # feature_extractor
    feature_extractor = getModel(cfg['model']['feature_extractor'])
    f_checkpoint = torch.load(cfg['f_load_ckpt'], map_location="cpu")
    feature_extractor.load_state_dict(f_checkpoint['model_state'])
    print("load feature_extractor on '{}' is complete.".format(
        cfg['f_load_ckpt']))

    if cfg['in_dataset']['img_size'] == 32:
        G = Generator32(cfg['in_dataset']['batch_size'],
                        cfg['in_dataset']['img_size'], cfg['model']['z_dim'],
                        cfg['model']['g_conv_dim']).cuda()
        D = Discriminator32(cfg['in_dataset']['batch_size'],
                            cfg['in_dataset']['img_size'],
                            cfg['model']['d_conv_dim']).cuda()
    else:
        G = Generator(cfg['in_dataset']['batch_size'],
                      cfg['in_dataset']['img_size'], cfg['model']['z_dim'],
                      cfg['model']['g_conv_dim']).cuda()
        D = Discriminator(cfg['in_dataset']['batch_size'],
                          cfg['in_dataset']['img_size'],
                          cfg['model']['d_conv_dim']).cuda()

    G_optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, G.parameters()),
        cfg['optim']['g_lr'], [cfg['optim']['beta1'], cfg['optim']['beta2']])
    D_optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, D.parameters()),
        cfg['optim']['d_lr'], [cfg['optim']['beta1'], cfg['optim']['beta2']])

    if cfg['g_load_ckpt'] != '':
        g_checkpoint = torch.load(cfg['g_load_ckpt'], map_location="cpu")
        G.load_state_dict(g_checkpoint['model_state'])
        print("load model on '{}' is complete.".format(cfg['g_load_ckpt']))
    if cfg['d_load_ckpt'] != '':
        d_checkpoint = torch.load(cfg['d_load_ckpt'], map_location="cpu")
        D.load_state_dict(d_checkpoint['model_state'])
        print("load model on '{}' is complete.".format(cfg['d_load_ckpt']))

    cudnn.benchmark = True

    # Data Loader
    in_train_loader = getDataLoader(ds_cfg=cfg['in_dataset'],
                                    dl_cfg=cfg['dataloader'],
                                    split="train")
    in_valid_loader = getDataLoader(ds_cfg=cfg['in_dataset'],
                                    dl_cfg=cfg['dataloader'],
                                    split="valid")

    if cfg['out_dataset'] is not None:
        out_train_loader = getDataLoader(ds_cfg=cfg['out_dataset'],
                                         dl_cfg=cfg['dataloader'],
                                         split="train")
        out_valid_loader = getDataLoader(ds_cfg=cfg['out_dataset'],
                                         dl_cfg=cfg['dataloader'],
                                         split="valid")
    else:
        out_train_loader = None
        out_valid_loader = None

    # Result directory and make tensorboard event file
    exp_dir = os.path.join(cfg['exp_root'], cfg['exp_dir'])
    if os.path.exists(exp_dir) is False:
        os.makedirs(exp_dir)
    shutil.copy('./config.py', os.path.join(exp_dir, "config.py"))
    writer_train = SummaryWriter(logdir=os.path.join(exp_dir, 'log', 'train'))
    writer_valid = SummaryWriter(logdir=os.path.join(exp_dir, 'log', 'valid'))

    global_cfg['loss'] = cfg['loss']

    #     print("=======================IMPORTANT CONFIG=======================")
    #     print(" Model    : {}\n \
    # Loss     : {}\n \
    # Detector : {}\n \
    # Optimizer: {}\n".format(cfg['model']['network_kind'], cfg['loss']['loss'], cfg['detector']['detector'], cfg['optim']['optimizer']))
    print("============Start training. Result will be saved in {}".format(
        exp_dir))
    fixed_z = tensor2var(torch.randn(1, G.z_dim))
    for cur_epoch in range(start_epoch, cfg['max_epoch'] + 1):
        if out_train_loader is not None:
            train_summary = train_epoch_w_outlier(model, optimizer,
                                                  in_train_loader,
                                                  out_train_loader, loss_func,
                                                  detector_func, cur_epoch,
                                                  cfg['optim'], writer_train)
        else:
            train_summary = train_epoch_wo_outlier(feature_extractor, G, D,
                                                   G_optimizer, D_optimizer,
                                                   in_train_loader, cur_epoch,
                                                   cfg['optim'], writer_train)
        summary_write(summary=train_summary, writer=writer_train)
        print(
            "Training result=========Epoch [{}]/[{}]=========\nlr: {} | loss: {}"
            .format(cur_epoch, cfg['max_epoch'], train_summary['lr'],
                    train_summary['avg_loss']))

        # Sample image
        G.eval()
        fake_images, _, _ = G(fixed_z, train_summary['real_feature'])
        save_image(((fake_images.data + 1) / 2).clamp_(0, 1),
                   os.path.join(
                       cfg['exp_root'], cfg['exp_dir'],
                       '{}_fake_{}.png'.format(cur_epoch,
                                               train_summary['real_target'])))

        #         if cur_epoch % cfg['valid_epoch'] == 0:
        #             if out_valid_loader is not None:
        #                 valid_summary = valid_epoch_w_outlier(model, in_valid_loader, out_valid_loader, loss_func, detector_func, cur_epoch)
        #             else:
        #                 valid_summary = valid_epoch_wo_outlier(model, in_valid_loader, loss_func, cur_epoch)
        #             summary_write(summary=valid_summary, writer=writer_valid)
        #             print("Validate result=========Epoch [{}]/[{}]=========\nloss: {} | acc: {}".format(cur_epoch, cfg['max_epoch'], valid_summary['avg_loss'], valid_summary['classifier_acc']))

        if cur_epoch % cfg['ckpt_epoch'] == 0:
            ckpt_dir = os.path.join(cfg['exp_root'], cfg['exp_dir'], "ckpt")
            if os.path.exists(ckpt_dir) is False:
                os.makedirs(ckpt_dir)

            G_state = G.module.state_dict(
            ) if cfg['ngpu'] > 1 else G.state_dict()
            G_checkpoint = {
                "epoch": cur_epoch,
                "model_state": G_state,
                "optimizer_state": G_optimizer.state_dict(),
            }
            ckpt_name = "G_checkpoint_epoch_{}".format(cur_epoch)
            ckpt_path = os.path.join(ckpt_dir, ckpt_name + ".pyth")
            torch.save(G_checkpoint, ckpt_path)

            D_state = D.module.state_dict(
            ) if cfg['ngpu'] > 1 else D.state_dict()
            D_checkpoint = {
                "epoch": cur_epoch,
                "model_state": D_state,
                "optimizer_state": D_optimizer.state_dict(),
            }
            ckpt_name = "D_checkpoint_epoch_{}".format(cur_epoch)
            ckpt_path = os.path.join(ckpt_dir, ckpt_name + ".pyth")
            torch.save(D_checkpoint, ckpt_path)