Exemplo n.º 1
0
def main():
    DEVICE = torch.device('cuda:{}'.format(args.d))
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(DEVICE)
    criterion = config.create_loss_function().to(DEVICE)

    optimizer = config.create_optimizer(net.parameters())
    lr_scheduler = config.create_lr_scheduler(optimizer)

    ds_train = create_train_dataset(args.batch_size)
    ds_val = create_test_dataset(args.batch_size)

    TrainAttack = config.create_attack_method(DEVICE)
    EvalAttack = config.create_evaluation_attack_method(DEVICE)

    now_epoch = 0

    if args.auto_continue:
        args.resume = os.path.join(config.model_dir, 'last.checkpoint')
    if args.resume is not None and os.path.isfile(args.resume):
        now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler)

    while True:
        if now_epoch > config.num_epochs:
            break
        now_epoch = now_epoch + 1

        descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(
            now_epoch, config.num_epochs,
            lr_scheduler.get_lr()[0])
        train_one_epoch(net,
                        ds_train,
                        optimizer,
                        criterion,
                        DEVICE,
                        descrip_str,
                        TrainAttack,
                        adv_coef=args.adv_coef)
        if config.eval_interval > 0 and now_epoch % config.eval_interval == 0:
            eval_one_epoch(net, ds_val, DEVICE, EvalAttack)

        lr_scheduler.step()

        save_checkpoint(now_epoch,
                        net,
                        optimizer,
                        lr_scheduler,
                        file_name=os.path.join(
                            config.model_dir,
                            'epoch-{}.checkpoint'.format(now_epoch)))
def main():
    model = create_network().to(device)

    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum,
                          weight_decay=args.weight_decay)

    EvalAttack = config.create_evaluation_attack_method(device)

    now_train_time = 0
    for epoch in range(1, args.epochs + 1):
        # adjust learning rate for SGD
        adjust_learning_rate(optimizer, epoch)

        s_time = time()
        descrip_str = 'Training epoch: {}/{}'.format(epoch, args.epochs)
        # adversarial training
        train(args, model, device, train_loader, optimizer, epoch, descrip_str)
        now_train_time += time() - s_time

        acc, advacc = eval_one_epoch(model, test_loader, device, EvalAttack)

        # save checkpoint
        if epoch % args.save_freq == 0:
            torch.save(
                model.state_dict(),
                os.path.join(config.model_dir,
                             'model-wideres-epoch{}.pt'.format(epoch)))
Exemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--resume',
        '--resume',
        default='log/models/last.checkpoint',
        type=str,
        metavar='PATH',
        help='path to latest checkpoint (default:log/last.checkpoint)')
    parser.add_argument('-d', type=int, default=0, help='Which gpu to use')
    args = parser.parse_args()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(device)

    ds_val = create_test_dataset(512)

    attack_method = config.create_evaluation_attack_method(device)

    if os.path.isfile(args.resume):
        load_checkpoint(args.resume, net)

    print('Evaluating')
    clean_acc, adv_acc = eval_one_epoch(net, ds_val, device, attack_method)
    print('clean acc -- {}     adv acc -- {}'.format(clean_acc, adv_acc))
def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    torch.backends.cudnn.benchmark = True

    net = create_network()
    net.to(device)
    criterion = config.create_loss_function().to(device)

    optimizer = config.create_optimizer(net.parameters())
    lr_scheduler = config.create_lr_scheduler(optimizer)

    ds_train = create_train_dataset(args.batch_size)
    ds_val = create_test_dataset(args.batch_size)

    train_attack = config.create_attack_method(device)
    eval_attack = config.create_evaluation_attack_method(device)

    now_epoch = 0

    if args.auto_continue:
        args.resume = os.path.join(config.model_dir, 'last.checkpoint')
    if args.resume is not None and os.path.isfile(args.resume):
        now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler)

    for i in range(now_epoch, config.num_epochs):
        # if now_epoch > config.num_epochs:
        #     break
        # now_epoch = now_epoch + 1

        descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(i, config.num_epochs,
                                                             lr_scheduler.get_last_lr()[0])
        train_one_epoch(net, ds_train, optimizer, criterion, device,
                        descrip_str, train_attack, adv_coef=args.adv_coef)
        if config.eval_interval > 0 and i % config.eval_interval == 0:
            eval_one_epoch(net, ds_val, device, eval_attack)

        lr_scheduler.step()

    save_checkpoint(i, net, optimizer, lr_scheduler,
                    file_name=os.path.join(config.model_dir, 'epoch-{}.checkpoint'.format(i)))
Exemplo n.º 5
0
        break
    now_epoch = now_epoch + 1

    descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(
        now_epoch, config.num_epochs,
        lr_scheduler.get_lr()[0])
    s_time = time.time()
    acc, yofoacc = train_one_epoch(net, ds_train, optimizer, criterion,
                                   LayerOneTrainer, config.K, DEVICE,
                                   descrip_str)
    now_train_time = now_train_time + time.time() - s_time
    tb_train_dic = {'Acc': acc, 'YofoAcc': yofoacc}
    print(tb_train_dic)
    writer.add_scalars('Train', tb_train_dic, now_epoch)
    if config.val_interval > 0 and now_epoch % config.val_interval == 0:
        acc, advacc = eval_one_epoch(net, ds_val, DEVICE, EvalAttack)
        tb_val_dic = {'Acc': acc, 'AdvAcc': advacc}
        writer.add_scalars('Val', tb_val_dic, now_epoch)
        tb_val_dic['time'] = now_train_time
        log_str = json.dumps(tb_val_dic)
        with open('time.log', 'a') as f:
            f.write(log_str + '\n')

    lr_scheduler.step()
    lyaer_one_optimizer_lr_scheduler.step()
    save_checkpoint(now_epoch,
                    net,
                    optimizer,
                    lr_scheduler,
                    file_name=os.path.join(
                        config.model_dir,
import os

parser = argparse.ArgumentParser()
parser.add_argument(
    '--resume',
    '--resume',
    default='../ckpts/full-epoch32.checkpoint',
    type=str,
    metavar='PATH',
    help='path to latest checkpoint (default:../ckpts/full-epoch32.checkpoint)'
)
parser.add_argument('-d', type=int, default=0, help='Which gpu to use')
args = parser.parse_args()

DEVICE = torch.device('cuda:{}'.format(args.d))
torch.backends.cudnn.benchmark = True

net = create_network()
net.to(DEVICE)

ds_val = create_test_dataset(512)

AttackMethod = config.create_evaluation_attack_method(DEVICE)

if os.path.isfile(args.resume):
    load_checkpoint(args.resume, net)

print('Evaluating')
clean_acc, adv_acc = eval_one_epoch(net, ds_val, DEVICE, AttackMethod)
print('clean acc -- {}     adv acc -- {}'.format(clean_acc, adv_acc))
Exemplo n.º 7
0
ds_train = create_train_dataset(args.batch_size)
ds_val = create_test_dataset(args.batch_size)

TrainAttack = config.create_attack_method(DEVICE)
EvalAttack = config.create_evaluation_attack_method(DEVICE)

now_epoch = 0

if args.auto_continue:
    args.resume = os.path.join(config.model_dir, 'last.checkpoint')
if args.resume is not None and os.path.isfile(args.resume):
    now_epoch = load_checkpoint(args.resume, net, optimizer,lr_scheduler)

while True:
    if now_epoch > config.num_epochs:
        break
    now_epoch = now_epoch + 1

    descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(now_epoch, config.num_epochs,
                                                                       lr_scheduler.get_lr()[0])
    train_one_epoch(net, ds_train, optimizer, criterion, DEVICE,
                    descrip_str, TrainAttack, adv_coef = args.adv_coef)
    if config.val_interval > 0 and now_epoch % config.val_interval == 0:
        eval_one_epoch(net, ds_val, DEVICE, EvalAttack)

    lr_scheduler.step()

    save_checkpoint(now_epoch, net, optimizer, lr_scheduler,
                    file_name = os.path.join(config.model_dir, 'epoch-{}.checkpoint'.format(now_epoch)))
Exemplo n.º 8
0
        break
    now_epoch = now_epoch + 1

    descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(
        now_epoch, config.num_epochs,
        lr_scheduler.get_lr()[0])
    train_one_epoch(
        net,
        ds_train,
        optimizer,
        criterion,
        DEVICE,
        descrip_str,
    )
    if config.val_interval > 0 and now_epoch % config.val_interval == 0:
        eval_one_epoch(
            net,
            ds_val,
            DEVICE,
        )

    lr_scheduler.step()

    save_checkpoint(now_epoch,
                    net,
                    optimizer,
                    lr_scheduler,
                    file_name=os.path.join(
                        config.model_dir,
                        'epoch-{}.checkpoint'.format(now_epoch)))
Exemplo n.º 9
0
            #  AttackMethod = None)
            trainattack = None
            train_one_epoch(net=net,
                            batch_generator=cifar10_training_loader,
                            optimizer=optimizer,
                            criterion=loss_function,
                            DEVICE=torch.device('cuda:{}'.format(0)),
                            descrip_str=str(epoch) + 'Training',
                            AttackMethod=trainattack)
        print('Learning_rate:', optimizer.param_groups[0]['lr'])
        if args.attack:
            AttackMethod = config.create_evaluation_attack_method(
                torch.device('cuda:{}'.format(0)))
        acc = eval_training(epoch)
        clean_acc, adv_acc = eval_one_epoch(net, cifar10_test_loader,
                                            torch.device('cuda:{}'.format(0)),
                                            AttackMethod)
        print('clean acc -- {}     adv acc -- {} in training mode'.format(
            clean_acc, adv_acc))
        # acc = eval_training(epoch)

        # clean_acc, adv_acc_t = eval_one_epoch(net, cifar10_test_loader, torch.device('cuda:{}'.format(0)), AttackMethod)
        # print('clean acc -- {}     adv acc -- {}'.format(clean_acc, adv_acc_t))

        #start to save best performance model after learning rate decay to 0.01
        if epoch > settings.MILESTONES2[1] and best_acc < adv_acc:
            torch.save(
                net.state_dict(),
                checkpoint_path.format(net=args.net, epoch=epoch, type='best'))
            best_acc = adv_acc
            continue