Beispiel #1
0
def main():
    DEVICE = torch.device('cuda:{}'.format(args.d))
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(DEVICE)
    criterion = config.create_loss_function().to(DEVICE)

    optimizer = config.create_optimizer(net.parameters())
    lr_scheduler = config.create_lr_scheduler(optimizer)

    ds_train = create_train_dataset(args.batch_size)
    ds_val = create_test_dataset(args.batch_size)

    TrainAttack = config.create_attack_method(DEVICE)
    EvalAttack = config.create_evaluation_attack_method(DEVICE)

    now_epoch = 0

    if args.auto_continue:
        args.resume = os.path.join(config.model_dir, 'last.checkpoint')
    if args.resume is not None and os.path.isfile(args.resume):
        now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler)

    while True:
        if now_epoch > config.num_epochs:
            break
        now_epoch = now_epoch + 1

        descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(
            now_epoch, config.num_epochs,
            lr_scheduler.get_lr()[0])
        train_one_epoch(net,
                        ds_train,
                        optimizer,
                        criterion,
                        DEVICE,
                        descrip_str,
                        TrainAttack,
                        adv_coef=args.adv_coef)
        if config.eval_interval > 0 and now_epoch % config.eval_interval == 0:
            eval_one_epoch(net, ds_val, DEVICE, EvalAttack)

        lr_scheduler.step()

        save_checkpoint(now_epoch,
                        net,
                        optimizer,
                        lr_scheduler,
                        file_name=os.path.join(
                            config.model_dir,
                            'epoch-{}.checkpoint'.format(now_epoch)))
def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(device)
    criterion = config.create_loss_function().to(device)

    optimizer = config.create_optimizer(net.parameters())
    lr_scheduler = config.create_lr_scheduler(optimizer)

    ds_train = create_train_dataset(args.batch_size)
    ds_val = create_test_dataset(args.batch_size)

    train_attack = config.create_attack_method(device)
    eval_attack = config.create_evaluation_attack_method(device)

    now_epoch = 0

    if args.auto_continue:
        args.resume = os.path.join(config.model_dir, 'last.checkpoint')
    if args.resume is not None and os.path.isfile(args.resume):
        now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler)

    for i in range(now_epoch, config.num_epochs):
        # if now_epoch > config.num_epochs:
        #     break
        # now_epoch = now_epoch + 1

        descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(i, config.num_epochs,
                                                             lr_scheduler.get_last_lr()[0])
        train_one_epoch(net, ds_train, optimizer, criterion, device,
                        descrip_str, train_attack, adv_coef=args.adv_coef)
        if config.eval_interval > 0 and i % config.eval_interval == 0:
            eval_one_epoch(net, ds_val, device, eval_attack)

        lr_scheduler.step()

    save_checkpoint(i, net, optimizer, lr_scheduler,
                    file_name=os.path.join(config.model_dir, 'epoch-{}.checkpoint'.format(i)))
Beispiel #3
0
optimizer = config.create_optimizer(net.other_layers.parameters())
lr_scheduler = config.create_lr_scheduler(optimizer)

Hamiltonian_func = Hamiltonian(net.layer_one, config.weight_decay)
layer_one_optimizer = optim.SGD(net.layer_one.parameters(),
                                lr=lr_scheduler.get_lr()[0],
                                momentum=0.9,
                                weight_decay=5e-4)
lyaer_one_optimizer_lr_scheduler = optim.lr_scheduler.MultiStepLR(
    layer_one_optimizer, milestones=[15, 19], gamma=0.1)
LayerOneTrainer = FastGradientLayerOneTrainer(Hamiltonian_func,
                                              layer_one_optimizer,
                                              config.inner_iters, config.sigma,
                                              config.eps)

ds_train = create_train_dataset(args.batch_size)
ds_val = create_test_dataset(args.batch_size)

EvalAttack = config.create_evaluation_attack_method(DEVICE)

now_epoch = 0

if args.auto_continue:
    args.resume = os.path.join(config.model_dir, 'last.checkpoint')
if args.resume is not None and os.path.isfile(args.resume):
    now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler)

now_train_time = 0
while True:
    if now_epoch > config.num_epochs:
        break
Beispiel #4
0
             color='g',
             linestyle='-.',
             label='YOPO Robust Error')
    plt.legend(loc='upper left')
    plt.show()


net = create_network()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
                      lr=1e-2,
                      momentum=0.95,
                      weight_decay=1e-4)

# prepare dataset
ds_train = create_train_dataset(batch_size)
ds_val = create_test_dataset(batch_size)


def visualize(time_arr, pgd_clean_err, pgd_robust_err):
    fig = plt.figure()
    ax1 = fig.add_subplot(111)

    ax1.plot(time_arr, pgd_clean_err, color='red', label='PGD Clean Error')
    ax1.plot(time_arr,
             pgd_robust_err,
             color='red',
             linestyle='-.',
             label='PGD Robust Error')
    plt.legend(loc='upper left')
    plt.show()
Beispiel #5
0
#                        [50, 0.0008]
#                        ]
learning_rate_policy = [[80, 0.01], [50, 0.001], [50, 0.0001]]
get_learing_rate = MultiStageLearningRatePolicy(learning_rate_policy)


def adjust_learning_rate(optimzier, epoch):
    #global get_lea
    lr = get_learing_rate(epoch)
    for param_group in optimizer.param_groups:

        param_group['lr'] = lr


torch.backends.cudnn.benchmark = True
ds_train = create_train_dataset()

ds_val = create_test_dataset()

net = cifar_resnet18(expansion=args.wide)
net.cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer = optim.SGD(net.parameters(),
                      lr=get_learing_rate(0),
                      momentum=0.9,
                      weight_decay=args.weight_decay)

PgdAttack = IPGD(eps=args.eps,
                 sigma=args.eps // 2,
                 nb_iter=args.iter,
                 norm=np.inf)