예제 #1
0
def code_test(model_path):

    from base_model.cifar_resnet18 import cifar_resnet18
    from dataset import create_test_dataset

    ds_val = create_test_dataset()

    #model_path = '../exps/exp0/checkpoint.pth.tar'

    net = cifar_resnet18()

    if model_path is not None:

        checkpoint = torch.load(model_path)

        net.load_state_dict(checkpoint['state_dict'])

    net.cuda()

    epoch = next(ds_val.epoch_generator())
    roboustness = evalRoboustness(net, epoch)

    print(roboustness)
    if model_path is not None:
        s = model_path.split('/')[-1]
        val_res_path = os.path.join(model_path[:-18], 'r_results.txt')
    with open(val_res_path, 'a') as f:
        f.write('\n')
        json.dump({'Roboustness': roboustness}, f)
예제 #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--resume',
        '--resume',
        default='log/models/last.checkpoint',
        type=str,
        metavar='PATH',
        help='path to latest checkpoint (default:log/last.checkpoint)')
    parser.add_argument('-d', type=int, default=0, help='Which gpu to use')
    args = parser.parse_args()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(device)

    ds_val = create_test_dataset(512)

    attack_method = config.create_evaluation_attack_method(device)

    if os.path.isfile(args.resume):
        load_checkpoint(args.resume, net)

    print('Evaluating')
    clean_acc, adv_acc = eval_one_epoch(net, ds_val, device, attack_method)
    print('clean acc -- {}     adv acc -- {}'.format(clean_acc, adv_acc))
def process_single_epoch():
    print('**************')
    parser = argparse.ArgumentParser()
    parser.add_argument('-d', type=int, default=0, help='Which gpu to use')
    args = parser.parse_args()


    DEVICE = torch.device('cuda:{}'.format(args.d))
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(DEVICE)

    ds_val = create_test_dataset(10000)

    AttackMethod = config.create_evaluation_attack_method(DEVICE)

    filename = '../ckpts/binary-epoch31.checkpoint'
    print(filename)
    if os.path.isfile(filename):
        load_checkpoint(filename, net)

    print('Evaluating')
    clean_acc, adv_acc = my_eval_one_epoch(net, ds_val, DEVICE, AttackMethod)
    print('clean acc -- {}     adv acc -- {}'.format(clean_acc, adv_acc))
예제 #4
0
def main():
    DEVICE = torch.device('cuda:{}'.format(args.d))
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(DEVICE)
    criterion = config.create_loss_function().to(DEVICE)

    optimizer = config.create_optimizer(net.parameters())
    lr_scheduler = config.create_lr_scheduler(optimizer)

    ds_train = create_train_dataset(args.batch_size)
    ds_val = create_test_dataset(args.batch_size)

    TrainAttack = config.create_attack_method(DEVICE)
    EvalAttack = config.create_evaluation_attack_method(DEVICE)

    now_epoch = 0

    if args.auto_continue:
        args.resume = os.path.join(config.model_dir, 'last.checkpoint')
    if args.resume is not None and os.path.isfile(args.resume):
        now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler)

    while True:
        if now_epoch > config.num_epochs:
            break
        now_epoch = now_epoch + 1

        descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(
            now_epoch, config.num_epochs,
            lr_scheduler.get_lr()[0])
        train_one_epoch(net,
                        ds_train,
                        optimizer,
                        criterion,
                        DEVICE,
                        descrip_str,
                        TrainAttack,
                        adv_coef=args.adv_coef)
        if config.eval_interval > 0 and now_epoch % config.eval_interval == 0:
            eval_one_epoch(net, ds_val, DEVICE, EvalAttack)

        lr_scheduler.step()

        save_checkpoint(now_epoch,
                        net,
                        optimizer,
                        lr_scheduler,
                        file_name=os.path.join(
                            config.model_dir,
                            'epoch-{}.checkpoint'.format(now_epoch)))
def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(device)
    criterion = config.create_loss_function().to(device)

    optimizer = config.create_optimizer(net.parameters())
    lr_scheduler = config.create_lr_scheduler(optimizer)

    ds_train = create_train_dataset(args.batch_size)
    ds_val = create_test_dataset(args.batch_size)

    train_attack = config.create_attack_method(device)
    eval_attack = config.create_evaluation_attack_method(device)

    now_epoch = 0

    if args.auto_continue:
        args.resume = os.path.join(config.model_dir, 'last.checkpoint')
    if args.resume is not None and os.path.isfile(args.resume):
        now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler)

    for i in range(now_epoch, config.num_epochs):
        # if now_epoch > config.num_epochs:
        #     break
        # now_epoch = now_epoch + 1

        descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(i, config.num_epochs,
                                                             lr_scheduler.get_last_lr()[0])
        train_one_epoch(net, ds_train, optimizer, criterion, device,
                        descrip_str, train_attack, adv_coef=args.adv_coef)
        if config.eval_interval > 0 and i % config.eval_interval == 0:
            eval_one_epoch(net, ds_val, device, eval_attack)

        lr_scheduler.step()

    save_checkpoint(i, net, optimizer, lr_scheduler,
                    file_name=os.path.join(config.model_dir, 'epoch-{}.checkpoint'.format(i)))
예제 #6
0
if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('--resume',
                        type=str,
                        default='../exps/tradeoff.eps8/checkpoint.pth.tar')
    parser.add_argument('-d', type=int, default=3)

    args = parser.parse_args()

    net_name = args.resume.split('/')[-2]
    print(net_name)
    path = os.path.join('../SmoothRes', net_name)
    if not os.path.exists(path):
        os.mkdir(path)
    net = models.resnet18(pretrained=False)
    net.fc.out_features = 200

    net.load_state_dict(torch.load(args.resume)['state_dict'])
    DEVICE = torch.device('cuda:{}'.format(args.d))

    net.to(DEVICE)
    dl = create_test_dataset(32)

    #xz_test(dl, 1,net, DEVICE)
    #test_model(net, dl)
    #l1_for_without_smooth(net, dl, DEVICE)
    #l1_for_with_smooth(net, dl, DEVICE)
    get_result(net, dl, DEVICE, net_name)
예제 #7
0
lr_scheduler = config.create_lr_scheduler(optimizer)

Hamiltonian_func = Hamiltonian(net.layer_one, config.weight_decay)
layer_one_optimizer = optim.SGD(net.layer_one.parameters(),
                                lr=lr_scheduler.get_lr()[0],
                                momentum=0.9,
                                weight_decay=5e-4)
lyaer_one_optimizer_lr_scheduler = optim.lr_scheduler.MultiStepLR(
    layer_one_optimizer, milestones=[15, 19], gamma=0.1)
LayerOneTrainer = FastGradientLayerOneTrainer(Hamiltonian_func,
                                              layer_one_optimizer,
                                              config.inner_iters, config.sigma,
                                              config.eps)

ds_train = create_train_dataset(args.batch_size)
ds_val = create_test_dataset(args.batch_size)

EvalAttack = config.create_evaluation_attack_method(DEVICE)

now_epoch = 0

if args.auto_continue:
    args.resume = os.path.join(config.model_dir, 'last.checkpoint')
if args.resume is not None and os.path.isfile(args.resume):
    now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler)

now_train_time = 0
while True:
    if now_epoch > config.num_epochs:
        break
    now_epoch = now_epoch + 1
예제 #8
0
파일: eval.py 프로젝트: a1600012888/GradTV
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            check_point = torch.load(args.resume)
            net.load_state_dict(check_point['state_dict'])

            print('Modeled loaded from {} with metrics:'.format(args.resume))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

        base_path = os.path.split(args.resume)[0]
    else:
        base_path = './'

    net.eval()
    criterion = torch.nn.CrossEntropyLoss().cuda()
    ds_val = create_test_dataset()
    ds_b = create_benchmark_dataset()

    if args.val:
        valacc, valcros, valtv, l1 = val_one_epoch(net, ds_val, criterion, clock)
        val_file = os.path.join(base_path, 'val.txt')
        with open(val_file, 'w') as f:
            print([valacc, valcros, valtv, l1], file=f)
    if args.bench:

        results_path = os.path.join(base_path, 'benchmark_results', str(clock.epoch))
        test_on_benchmark(net, ds_b, criterion, results_path)

    if args.smooth:

        results_path = os.path.join(base_path, 'smooth')
예제 #9
0
파일: gen_result.py 프로젝트: bfshi/AT-CNN
    args = parser.parse_args()


    net_name = args.resume.split('/')[-2]
    print(net_name)
    path = os.path.join('../SmoothRes', net_name)
    if not os.path.exists(path):
        os.mkdir(path)
    net = models.resnet18(pretrained=False)
    net.fc.out_features = 200

    net.load_state_dict(torch.load(args.resume)['state_dict'])
    DEVICE = torch.device('cuda:{}'.format(args.d))

    net.to(DEVICE)
    dl_teacher = create_test_dataset(32)
    if args.p is None and args.b is None:
        dl = create_test_dataset(32)

    if args.b is not None and args.p is None:
        dl = create_brighness_test_dataset(batch_size = 32,
                                           root = './', bright_level = args.b)

    if args.p is not None and args.b is None:
        dl = create_saturation_test_dataset(32, root = './', saturation_level = args.p)

    if args.k is not None:
        dl = create_patch_test_dataset(32, './', args.k)

    # style
    #dl = create_style_test_dataset(32)
예제 #10
0
             linestyle='-.',
             label='YOPO Robust Error')
    plt.legend(loc='upper left')
    plt.show()


net = create_network()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
                      lr=1e-2,
                      momentum=0.95,
                      weight_decay=1e-4)

# prepare dataset
ds_train = create_train_dataset(batch_size)
ds_val = create_test_dataset(batch_size)


def visualize(time_arr, pgd_clean_err, pgd_robust_err):
    fig = plt.figure()
    ax1 = fig.add_subplot(111)

    ax1.plot(time_arr, pgd_clean_err, color='red', label='PGD Clean Error')
    ax1.plot(time_arr,
             pgd_robust_err,
             color='red',
             linestyle='-.',
             label='PGD Robust Error')
    plt.legend(loc='upper left')
    plt.show()
예제 #11
0
    window_size               = int(arguments['<window_size>'])
    smoothing_window_size     = int(arguments['<smoothing_window_size>'])
    pickled_model_file        = arguments['<pickled_model_file>']
    output_file               = arguments['<output_file>']
    original_pssm             = arguments['--original']
    exp_pssm                  = arguments['--exp']
    smoothed_pssm             = arguments['--smoothed']
    exp_smoothed_pssm         = arguments['--exp_smoothed']
    AAindex                   = arguments['--aaindex']
    secondary_structure       = arguments['--secondary_structure']
    undersampling             = False
    shuffle                   = True

    # Normalizing AAindex.
    feature.normalize_AAindex()

    data_filepath = filepath.DataFilepath(data_path_file)
    protein_holder = create_ProteinHolder(smoothing_window_size, data_filepath)
    test_dataset, proteinid_index_list = dataset.create_test_dataset(protein_holder, window_size, original_pssm=original_pssm,
                                                       exp_pssm=exp_pssm, smoothed_pssm=smoothed_pssm, exp_smoothed_pssm=exp_smoothed_pssm,
                                                       AAindex=AAindex, secondary_structure=secondary_structure, )
    pkl_file = open(pickled_model_file, 'rb')
    clf = pickle.load(pkl_file)
    pkl_file.close()
    decision_values = clf.decision_function(test_dataset)
    if type(decision_values[0]) is list or type(decision_values[0]) is numpy.ndarray:
        decision_values = map(lambda x: x[0], decision_values)
    predicted_labels = clf.predict(test_dataset)

    write_to_output_file(output_file, decision_values, predicted_labels, proteinid_index_list)
예제 #12
0
            break

    labels = np.array(labels)
    labels = labels.reshape(-1)

    np.savetxt(os.path.join(p, 'label.txt'), labels)


if __name__ == '__main__':

    root = '../../data/benchmark/'
    names = [
        'val', 'style', 'sat1024', 'sat1', 'sat64', 'sat16', 'sat8', 'sat4',
        'p2', 'p4', 'p8'
    ]
    dl_val = create_test_dataset(batch_size=64)
    dl_style = create_style_test_dataset(batch_size=64)
    dl_sat1024 = create_saturation_test_dataset(batch_size=64,
                                                saturation_level=1024)
    dl_sat02 = create_saturation_test_dataset(batch_size=64,
                                              saturation_level=1)
    dl_sat64 = create_saturation_test_dataset(batch_size=64,
                                              saturation_level=64)
    dl_sat16 = create_saturation_test_dataset(batch_size=64,
                                              saturation_level=16)
    dl_sat8 = create_saturation_test_dataset(batch_size=64, saturation_level=8)
    dl_sat4 = create_saturation_test_dataset(batch_size=64, saturation_level=4)
    dl_p2 = create_patch_test_dataset(64, k=2)
    dl_p4 = create_patch_test_dataset(64, k=4)
    dl_p8 = create_patch_test_dataset(64, k=8)