コード例 #1
0
ファイル: eval.py プロジェクト: WrongWhp/kWTA-Activation
def eval(models_config, attacks_config, log_file, cuda_id):
    model_list = load_model(models_config)
    attack_list = load_attacks(attacks_config)

    device = torch.device("cuda:{}".format(cuda_id))

    for model_ in model_list:
        datasetname, modelfile, model = model_[0], model_[1], model_[2]
        dataset = load_dataset(datasetname)
        with open(log_file, 'a+') as logf:
            logf.write('model: {}\n'.format(modelfile))
        print('model: {}'.format(modelfile))
        model.to(device)
        model.eval()

        loader = DataLoader(dataset, batch_size=16, shuffle=True)
        err, _ = training.epoch(loader, model, device=device, use_tqdm=True)
        with open(log_file, 'a+') as logf:
            logf.write('standard acc: {}\n'.format(1 - err))
        print('standard acc: {}'.format(1 - err))

        for attack_ in attack_list:
            with open(log_file, 'a+') as logf:
                logf.write(attack_['name'] + '\n')
                jstr = json.dumps(attack_['args'])
                logf.write(jstr + '\n')
                print(attack_['name'])
                print(jstr)

            if attack_['foolbox']:
                loader = DataLoader(dataset, batch_size=1, shuffle=True)
                fmodel = foolbox.models.PyTorchModel(model,
                                                     bounds=(0, 1),
                                                     num_classes=10,
                                                     preprocessing=(0, 1),
                                                     device=device)
                attack = attack_['method'](
                    fmodel, distance=foolbox.distances.Linfinity)
                err, _ = attack_foolbox.epoch_foolbox(loader,
                                                      attack,
                                                      use_tqdm=True,
                                                      **attack_['args'])
                with open(log_file, 'a+') as logf:
                    logf.write('acc: {}\n'.format(1 - err))
                    print('acc: {}'.format(1 - err))
            else:
                loader = DataLoader(dataset, batch_size=16, shuffle=True)
                attack = attack_['method']
                err, _ = training.epoch_adversarial(loader,
                                                    model,
                                                    attack,
                                                    device=device,
                                                    use_tqdm=True,
                                                    **attack_['args'])
                with open(log_file, 'a+') as logf:
                    logf.write('acc: {}\n'.format(1 - err))
                    print('acc: {}'.format(1 - err))
        model.to('cpu')
コード例 #2
0
device = torch.device('cuda:0')
model = resnet.ResNet18().to(device)
print("model loading --------Relu")
model.load_state_dict(torch.load('models/resnet18_cifar_80epochs.pth'))
eps = 0.031
model.eval()
test_err, test_loss = training.epoch(test_loader,
                                     model,
                                     device=device,
                                     use_tqdm=True)
print("test", test_err)
adv_err, adv_loss = training.epoch_adversarial(
    test_loader,
    model,
    attack=attack.pgd_linf_untargeted,
    device=device,
    num_iter=20,
    use_tqdm=True,
    epsilon=eps,
    randomize=True,
    alpha=0.003)
print("PGD:", adv_err)

adv_err, adv_loss = training.epoch_adversarial(test_loader,
                                               model,
                                               attack=attack.CW,
                                               device=device,
                                               num_iter=20,
                                               use_tqdm=True)
print("CW:", adv_err)

adv_err, adv_loss = training.epoch_adversarial(test_loader,
コード例 #3
0
ファイル: mnist.py プロジェクト: WrongWhp/kWTA-Activation
def train(config, cuda_id):
    mnist_train = datasets.MNIST("./data",
                                 train=True,
                                 download=True,
                                 transform=transforms.ToTensor())
    mnist_test = datasets.MNIST("./data",
                                train=False,
                                download=True,
                                transform=transforms.ToTensor())
    train_loader = DataLoader(mnist_train,
                              batch_size=config['train_batch_size'],
                              shuffle=True)
    test_loader = DataLoader(mnist_test,
                             batch_size=config['test_batch_size'],
                             shuffle=True)

    eps = config['eps']
    alpha = config['alpha']
    device = torch.device('cuda:{}'.format(cuda_id))

    name = config['model']['name']
    if name == 'DNN':
        model = mnist_model.DNN(hidden_size=config['model']['hidden_size'])
    elif name == 'spDNN':
        model = mnist_model.SparseDNN(
            hidden_size=config['model']['hidden_size'],
            sp=config['model']['sp'],
            bias=True)
    elif name == 'CNN':
        model = mnist_model.MNIST_CNN(
            num_channels=config['model']['channels'],
            hidden_size=config['model']['hidden_size'])
    elif name == 'spCNN':
        model = mnist_model.SparseMNIST_CNN(
            sp1=config['model']['sp1'],
            sp2=config['model']['sp2'],
            func='vol',
            num_channels=config['model']['channels'],
            hidden_size=config['model']['hidden_size'])
    else:
        raise ValueError

    model.to(device)
    opt = optim.SGD(model.parameters(),
                    lr=config['lr'],
                    momentum=config['momentum'])

    logfilename = config['logfilename']

    with open(logfilename, 'a+') as logf:
        jstr = json.dumps(config)
        logf.write(jstr + '\n')

    starttime = time.time()
    for i in range(config['epoch']):
        if config['adv_train']:
            train_err, train_loss = training.epoch_adversarial(
                train_loader,
                model,
                attack=attack.pgd_linf_untargeted_mostlikely,
                device=device,
                opt=opt,
                num_iter=20,
                use_tqdm=False,
                epsilon=eps,
                randomize=True,
                alpha=alpha)
        else:
            train_err, train_loss = training.epoch(train_loader,
                                                   model,
                                                   opt,
                                                   device=device,
                                                   use_tqdm=False)

        test_err, test_loss = training.epoch(test_loader,
                                             model,
                                             device=device,
                                             use_tqdm=False)
        adv_err1, adv_loss1 = training.epoch_adversarial(
            test_loader,
            model,
            attack=attack.pgd_linf_untargeted,
            device=device,
            num_iter=20,
            use_tqdm=False,
            epsilon=eps,
            randomize=True,
            alpha=alpha,
            n_test=config['n_test_adv'])

        adv_err2, adv_loss2 = training.epoch_adversarial(
            test_loader,
            model,
            attack=attack.pgd_linf_untargeted2,
            device=device,
            num_iter=20,
            use_tqdm=False,
            epsilon=eps,
            randomize=True,
            alpha=alpha,
            n_test=config['n_test_adv'])

        adv_err_ml, adv_loss_ml = training.epoch_adversarial(
            test_loader,
            model,
            attack=attack.pgd_linf_untargeted_mostlikely,
            device=device,
            num_iter=20,
            use_tqdm=False,
            epsilon=eps,
            randomize=True,
            alpha=alpha,
            n_test=config['n_test_adv'])

        print('epoch: {}'.format(i))
        print('train err: {}, test err: {}, adv1 err: {}, adv2 err: {}'.format(
            train_err, test_err, adv_err1, adv_err2))
        print(
            'train err: {}, test err: {}, adv1 err: {}, adv2 err: {}, adv_ml err: {}'
            .format(train_err, test_err, adv_err1, adv_err2, adv_err_ml))

        time_e = (time.time() - starttime) / 60
        time_r = (config['epoch'] - (i + 1)) * time_e / (i + 1)
        print('time elapse: {}, time remaining:{}'.format(time_e, time_r))
        with open(logfilename, "a+") as logf:
            logf.write('epoch: {}\n'.format(i))
            logf.write(
                'train err: {}, test err: {}, adv1 err: {}, adv2 err: {}, adv_ml err: {}, time_e:{}min\n'
                .format(train_err, test_err, adv_err1, adv_err2, adv_err_ml,
                        time_e))
        torch.save(model.state_dict(), config["savename"])

    if 'finetune' in config:
        activation_list = activation.append_activation_list(model, 1000)
        opt = optim.SGD(model.parameters(),
                        lr=config['finetune']['lr'],
                        momentum=config['finetune']['momentum'])
        sp = config['model']['sp1']
        for i in range(config['finetune']['epoch']):
            sp = sp - config['finetune']['sp_step']
            for l in activation_list:
                l.sr = sp

            if config['adv_train']:
                train_err, train_loss = training.epoch_adversarial(
                    train_loader,
                    model,
                    attack=attack.pgd_linf_untargeted_mostlikely,
                    device=device,
                    opt=opt,
                    num_iter=20,
                    use_tqdm=False,
                    epsilon=eps,
                    randomize=True,
                    alpha=alpha)
            else:
                train_err, train_loss = training.epoch(train_loader,
                                                       model,
                                                       opt,
                                                       device=device,
                                                       use_tqdm=False)

            test_err, test_loss = training.epoch(test_loader,
                                                 model,
                                                 device=device,
                                                 use_tqdm=False)
            adv_err1, adv_loss1 = training.epoch_adversarial(
                test_loader,
                model,
                attack=attack.pgd_linf_untargeted,
                device=device,
                num_iter=20,
                use_tqdm=False,
                epsilon=eps,
                randomize=True,
                alpha=alpha,
                n_test=config['n_test_adv'])

            adv_err2, adv_loss2 = training.epoch_adversarial(
                test_loader,
                model,
                attack=attack.pgd_linf_untargeted2,
                device=device,
                num_iter=20,
                use_tqdm=False,
                epsilon=eps,
                randomize=True,
                alpha=alpha,
                n_test=config['n_test_adv'])

            adv_err_ml, adv_loss_ml = training.epoch_adversarial(
                test_loader,
                model,
                attack=attack.pgd_linf_untargeted_mostlikely,
                device=device,
                num_iter=20,
                use_tqdm=False,
                epsilon=eps,
                randomize=True,
                alpha=alpha,
                n_test=config['n_test_adv'])

            print('epoch: {}'.format(i))
            print('current sp: {}'.format(sp))
            print(
                'train err: {}, test err: {}, adv1 err: {}, adv2 err: {}, adv_ml err: {}'
                .format(train_err, test_err, adv_err1, adv_err2, adv_err_ml))

            time_e = (time.time() - starttime) / 60
            time_r = (config['finetune']['epoch'] - (i + 1)) * time_e / (i + 1)
            print('time elapse: {}, time remaining:{}'.format(time_e, time_r))
            with open(logfilename, "a+") as logf:
                logf.write('epoch: {}\n'.format(i))
                logf.write('current sp: {}'.format(sp))
                logf.write(
                    'train err: {}, test err: {}, adv1 err: {}, adv2 err: {}, adv_ml err: {}, time_e:{}min\n'
                    .format(train_err, test_err, adv_err1, adv_err2,
                            adv_err_ml, time_e))
            torch.save(model.state_dict(),
                       config["finetune"]["savepath"] + "_sp{}.pth".format(sp))
コード例 #4
0
ファイル: svhn.py プロジェクト: WrongWhp/kWTA-Activation
def train(config, cuda_id):

    norm_mean = 0
    norm_var = 1

    transform_train = transforms.Compose([
        # transforms.RandomCrop(32, padding=4),
        # transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((norm_mean, norm_mean, norm_mean),
                             (norm_var, norm_var, norm_var)),
    ])

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((norm_mean, norm_mean, norm_mean),
                             (norm_var, norm_var, norm_var)),
    ])
    svhn_train = datasets.SVHN("./data",
                               split='train',
                               download=True,
                               transform=transform_train)
    svhn_test = datasets.SVHN("./data",
                              split='test',
                              download=True,
                              transform=transform_test)
    train_loader = DataLoader(svhn_train,
                              batch_size=config['train_batch_size'],
                              shuffle=True)
    test_loader = DataLoader(svhn_test,
                             batch_size=config['test_batch_size'],
                             shuffle=True)

    eps = config['eps']
    alpha = config['alpha']
    device = torch.device('cuda:{}'.format(cuda_id))

    model = load_model(config['model'])

    model.to(device)
    opt = optim.SGD(model.parameters(),
                    lr=config['lr'],
                    momentum=config['momentum'])

    logfilename = config['logfilename']

    with open(logfilename, 'a+') as logf:
        jstr = json.dumps(config)
        logf.write(jstr + '\n')

    starttime = time.time()

    if 'finetune' in config:
        activation_list = activation.append_activation_list(model, 10000)
        sp = config['finetune']['init_sp']

    for i in range(config['epoch']):
        if 'finetune' in config:
            if i >= config['finetune']['start_epoch']:
                if i % config['finetune']['adjust_epoch'] == 0:
                    sp = sp - config['finetune']['sp_step']
                    sp = round(sp, 5)
                    for l in activation_list:
                        l.sr = sp

        if i == config['epoch1']:
            for param_group in opt.param_groups:
                param_group['lr'] = config['epoch1_lr']

        if i == config['epoch2']:
            for param_group in opt.param_groups:
                param_group['lr'] = config['epoch2_lr']

        if 'adv_train' in config:
            if config['adv_train']['attack'] == 'untarg1':
                train_err, train_loss = training.epoch_adversarial(
                    train_loader,
                    model,
                    attack=attack.pgd_linf_untargeted,
                    device=device,
                    opt=opt,
                    num_iter=20,
                    use_tqdm=False,
                    epsilon=eps,
                    randomize=True,
                    alpha=alpha)
            elif config['adv_train']['attack'] == 'untarg2':
                train_err, train_loss = training.epoch_adversarial(
                    train_loader,
                    model,
                    attack=attack.pgd_linf_untargeted2,
                    device=device,
                    opt=opt,
                    num_iter=20,
                    use_tqdm=False,
                    epsilon=eps,
                    randomize=True,
                    alpha=alpha)
            elif config['adv_train']['attack'] == 'ml':
                train_err, train_loss = training.epoch_adversarial(
                    train_loader,
                    model,
                    attack=attack.pgd_linf_untargeted_mostlikely,
                    device=device,
                    opt=opt,
                    num_iter=20,
                    use_tqdm=False,
                    epsilon=eps,
                    randomize=True,
                    alpha=alpha)
            elif config['adv_train']['attack'] == 'trade':
                train_err, train_loss = training.epoch_trade(train_loader,
                                                             model,
                                                             opt=opt,
                                                             device=device,
                                                             step_size=alpha,
                                                             epsilon=eps,
                                                             perturb_steps=10,
                                                             beta=6)
            else:
                raise NotImplementedError
        else:
            train_err, train_loss = training.epoch(train_loader,
                                                   model,
                                                   opt,
                                                   device=device,
                                                   use_tqdm=False)

        test_err, test_loss = training.epoch(test_loader,
                                             model,
                                             device=device,
                                             use_tqdm=False)

        adv_errs = []
        if 'untarg1' in config['test_attack']:
            adv_err, adv_loss = training.epoch_adversarial(
                test_loader,
                model,
                attack=attack.pgd_linf_untargeted,
                device=device,
                num_iter=20,
                use_tqdm=False,
                epsilon=eps,
                randomize=True,
                alpha=alpha,
                n_test=config['n_test_adv'])
            adv_errs.append(adv_err)

        if 'untarg2' in config['test_attack']:
            adv_err, adv_loss = training.epoch_adversarial(
                test_loader,
                model,
                attack=attack.pgd_linf_untargeted2,
                device=device,
                num_iter=20,
                use_tqdm=False,
                epsilon=eps,
                randomize=True,
                alpha=alpha,
                n_test=config['n_test_adv'])
            adv_errs.append(adv_err)

        if 'ml' in config['test_attack']:
            adv_err, adv_loss = training.epoch_adversarial(
                test_loader,
                model,
                attack=attack.pgd_linf_untargeted2,
                device=device,
                num_iter=20,
                use_tqdm=False,
                epsilon=eps,
                randomize=True,
                alpha=alpha,
                n_test=config['n_test_adv'])
            adv_errs.append(adv_err)

        print('epoch: {}'.format(i))
        print('train err: {:.5f}, test err: {:.5f}'.format(
            train_err, test_err))
        for adv_err in adv_errs:
            print('adv err: {:.5f}'.format(adv_err))

        time_e = (time.time() - starttime) / 60
        time_r = (config['epoch'] - (i + 1)) * time_e / (i + 1)
        print('time elapse: {:.5f} min, time remaining:{:.5f} min'.format(
            time_e, time_r))
        with open(logfilename, "a+") as logf:
            logf.write('epoch: {}\n'.format(i))
            logf.write('train err: {:.5f}, test err: {:.5f}\n'.format(
                train_err, test_err))
            for adv_err in adv_errs:
                logf.write('adv err: {:.5f}\n'.format(adv_err))
            logf.write('time elapse: {:.5f} min'.format(time_e))
        torch.save(model.state_dict(), config["savename"])

        if 'finetune' in config:
            print('current sp: {}'.format(sp))
            with open(logfilename, "a+") as logf:
                logf.write('current sp: {}\n'.format(sp))
            torch.save(model.state_dict(),
                       config["finetune"]["savepath"] + "_sp{}.pth".format(sp))
コード例 #5
0
    print('which_AT:', args.which_AT)
    print('is_Wide:', args.is_Wide)

    if ep == 50:
        for param_group in opt.param_groups:
            param_group['lr'] = 0.01

    start_time = time.time()

    if not args.is_test:
        if args.which_AT == 'at':
            train_err, train_loss = training.epoch_adversarial(
                train_loader,
                model,
                opt=opt,
                attack=attack.pgd_linf_untargeted,
                device=device,
                num_iter=args.iters,
                epsilon=eps,
                randomize=True,
                alpha=2 / 255)
        elif args.which_AT == 'trades':
            train_err, train_loss = training.epoch_trade(train_loader,
                                                         model,
                                                         opt=opt,
                                                         device=device,
                                                         num_iter=args.iters,
                                                         epsilon=eps,
                                                         alpha=2 / 255,
                                                         beta=6.0)
        elif args.which_AT == 'nat':
            train_err, train_loss = training.epoch(train_loader,