def process_single_epoch(): print('**************') parser = argparse.ArgumentParser() parser.add_argument('-d', type=int, default=0, help='Which gpu to use') args = parser.parse_args() DEVICE = torch.device('cuda:{}'.format(args.d)) torch.backends.cudnn.benchmark = True net = create_network() net.to(DEVICE) nat_val = load_test_dataset(10000, natural=True) adv_val = load_test_dataset(10000, natural=False) AttackMethod = config.create_evaluation_attack_method(DEVICE) filename = '../ckpts/6leaf-epoch29.checkpoint' print(filename) if os.path.isfile(filename): load_checkpoint(filename, net) print('Evaluating Natural Samples') clean_acc, adv_acc = my_eval_one_epoch(net, nat_val, DEVICE, AttackMethod) print('clean acc -- {} adv acc -- {}'.format(clean_acc, adv_acc)) print('Evaluating Adversarial Samples') clean_acc, adv_acc = my_eval_one_epoch(net, adv_val, DEVICE, AttackMethod) print('clean acc -- {} adv acc -- {}'.format(clean_acc, adv_acc))
def main(): model = create_network().to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) EvalAttack = config.create_evaluation_attack_method(device) now_train_time = 0 for epoch in range(1, args.epochs + 1): # adjust learning rate for SGD adjust_learning_rate(optimizer, epoch) s_time = time() descrip_str = 'Training epoch: {}/{}'.format(epoch, args.epochs) # adversarial training train(args, model, device, train_loader, optimizer, epoch, descrip_str) now_train_time += time() - s_time acc, advacc = eval_one_epoch(model, test_loader, device, EvalAttack) # save checkpoint if epoch % args.save_freq == 0: torch.save( model.state_dict(), os.path.join(config.model_dir, 'model-wideres-epoch{}.pt'.format(epoch)))
def main(): parser = argparse.ArgumentParser() parser.add_argument( '--resume', '--resume', default='log/models/last.checkpoint', type=str, metavar='PATH', help='path to latest checkpoint (default:log/last.checkpoint)') parser.add_argument('-d', type=int, default=0, help='Which gpu to use') args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' torch.backends.cudnn.benchmark = True net = create_network() net.to(device) ds_val = create_test_dataset(512) attack_method = config.create_evaluation_attack_method(device) if os.path.isfile(args.resume): load_checkpoint(args.resume, net) print('Evaluating') clean_acc, adv_acc = eval_one_epoch(net, ds_val, device, attack_method) print('clean acc -- {} adv acc -- {}'.format(clean_acc, adv_acc))
def main(): DEVICE = torch.device('cuda:{}'.format(args.d)) torch.backends.cudnn.benchmark = True net = create_network() net.to(DEVICE) criterion = config.create_loss_function().to(DEVICE) optimizer = config.create_optimizer(net.parameters()) lr_scheduler = config.create_lr_scheduler(optimizer) ds_train = create_train_dataset(args.batch_size) ds_val = create_test_dataset(args.batch_size) TrainAttack = config.create_attack_method(DEVICE) EvalAttack = config.create_evaluation_attack_method(DEVICE) now_epoch = 0 if args.auto_continue: args.resume = os.path.join(config.model_dir, 'last.checkpoint') if args.resume is not None and os.path.isfile(args.resume): now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler) while True: if now_epoch > config.num_epochs: break now_epoch = now_epoch + 1 descrip_str = 'Training epoch:{}/{} -- lr:{}'.format( now_epoch, config.num_epochs, lr_scheduler.get_lr()[0]) train_one_epoch(net, ds_train, optimizer, criterion, DEVICE, descrip_str, TrainAttack, adv_coef=args.adv_coef) if config.eval_interval > 0 and now_epoch % config.eval_interval == 0: eval_one_epoch(net, ds_val, DEVICE, EvalAttack) lr_scheduler.step() save_checkpoint(now_epoch, net, optimizer, lr_scheduler, file_name=os.path.join( config.model_dir, 'epoch-{}.checkpoint'.format(now_epoch)))
def main(): device = 'cuda' if torch.cuda.is_available() else 'cpu' torch.backends.cudnn.benchmark = True net = create_network() net.to(device) criterion = config.create_loss_function().to(device) optimizer = config.create_optimizer(net.parameters()) lr_scheduler = config.create_lr_scheduler(optimizer) ds_train = create_train_dataset(args.batch_size) ds_val = create_test_dataset(args.batch_size) train_attack = config.create_attack_method(device) eval_attack = config.create_evaluation_attack_method(device) now_epoch = 0 if args.auto_continue: args.resume = os.path.join(config.model_dir, 'last.checkpoint') if args.resume is not None and os.path.isfile(args.resume): now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler) for i in range(now_epoch, config.num_epochs): # if now_epoch > config.num_epochs: # break # now_epoch = now_epoch + 1 descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(i, config.num_epochs, lr_scheduler.get_last_lr()[0]) train_one_epoch(net, ds_train, optimizer, criterion, device, descrip_str, train_attack, adv_coef=args.adv_coef) if config.eval_interval > 0 and i % config.eval_interval == 0: eval_one_epoch(net, ds_val, device, eval_attack) lr_scheduler.step() save_checkpoint(i, net, optimizer, lr_scheduler, file_name=os.path.join(config.model_dir, 'epoch-{}.checkpoint'.format(i)))
Hamiltonian_func = Hamiltonian(net.layer_one, config.weight_decay) layer_one_optimizer = optim.SGD(net.layer_one.parameters(), lr=lr_scheduler.get_lr()[0], momentum=0.9, weight_decay=5e-4) lyaer_one_optimizer_lr_scheduler = optim.lr_scheduler.MultiStepLR( layer_one_optimizer, milestones=[15, 19], gamma=0.1) LayerOneTrainer = FastGradientLayerOneTrainer(Hamiltonian_func, layer_one_optimizer, config.inner_iters, config.sigma, config.eps) ds_train = create_train_dataset(args.batch_size) ds_val = create_test_dataset(args.batch_size) EvalAttack = config.create_evaluation_attack_method(DEVICE) now_epoch = 0 if args.auto_continue: args.resume = os.path.join(config.model_dir, 'last.checkpoint') if args.resume is not None and os.path.isfile(args.resume): now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler) now_train_time = 0 while True: if now_epoch > config.num_epochs: break now_epoch = now_epoch + 1 descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(