def main(): device = 'cuda' if torch.cuda.is_available() else 'cpu' torch.backends.cudnn.benchmark = True net = create_network() net.to(device) criterion = config.create_loss_function().to(device) optimizer = config.create_optimizer(net.parameters()) lr_scheduler = config.create_lr_scheduler(optimizer) ds_train = create_train_dataset(args.batch_size) ds_val = create_test_dataset(args.batch_size) train_attack = config.create_attack_method(device) eval_attack = config.create_evaluation_attack_method(device) now_epoch = 0 if args.auto_continue: args.resume = os.path.join(config.model_dir, 'last.checkpoint') if args.resume is not None and os.path.isfile(args.resume): now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler) for i in range(now_epoch, config.num_epochs): descr_str = 'Epoch:{}/{} -- lr:{}'.format( i, config.num_epochs, lr_scheduler.get_last_lr()[0]) cleanacc, advacc = train_one_epoch(net, ds_train, optimizer, criterion, device, descr_str, train_attack, adv_coef=args.adv_coef) tb_train_dic = {'Acc': cleanacc, 'YofoAcc': advacc} print('Train: {}'.format(tb_train_dic)) if config.eval_interval > 0 and i % config.eval_interval == 0: acc, advacc = eval_one_epoch(net, ds_val, device, eval_attack) tb_val_dic = {'Acc': acc, 'AdvAcc': advacc} print('Eval: {}'.format(tb_val_dic)) lr_scheduler.step() save_checkpoint(i, net, optimizer, lr_scheduler, file_name=os.path.join(config.model_dir, 'advtrain.{}'.format(i))) acc, advacc = eval_one_epoch(net, ds_val, device, eval_attack) tb_val_dic = {'Acc': acc, 'AdvAcc': advacc} print('Eval: {}'.format(tb_val_dic))
def main(): device = 'cuda' if torch.cuda.is_available() else 'cpu' net = create_network() net.to(device) criterion = config.create_loss_function().to(device) optimizer = config.create_optimizer(net.other_layers.parameters()) lr_scheduler = config.create_lr_scheduler(optimizer) Hamiltonian_func = Hamiltonian(net.layer_one, config.weight_decay) layer_one_optimizer = optim.SGD(net.layer_one.parameters(), lr=lr_scheduler.get_last_lr()[0], momentum=0.9, weight_decay=5e-4) lyaer_one_optimizer_lr_scheduler = optim.lr_scheduler.MultiStepLR(layer_one_optimizer, milestones=[70, 90, 100], gamma=0.1) LayerOneTrainer = FastGradientLayerOneTrainer(Hamiltonian_func, layer_one_optimizer, config.inner_iters, config.sigma, config.eps) ds_train = create_train_dataset(args.batch_size) ds_eval = create_test_dataset(args.batch_size) eval_attack = config.create_evaluation_attack_method(device) now_epoch = 0 if args.auto_continue: args.resume = os.path.join(config.model_dir, 'last.checkpoint') if args.resume is not None and os.path.isfile(args.resume): now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler) for i in range(now_epoch, config.num_epochs): descrip_str = 'Training epoch:{}/{} -- lr:{}'.format(i, config.num_epochs, lr_scheduler.get_last_lr()[0]) acc, yofoacc = train_one_epoch(net, ds_train, optimizer, criterion, LayerOneTrainer, config.K, device, descrip_str) tb_train_dic = {'Acc': acc, 'YofoAcc': yofoacc} print('Train: {}'.format(tb_train_dic)) if config.eval_interval > 0 and i % config.eval_interval == 0: acc, advacc = eval_one_epoch(net, ds_eval, device, eval_attack) tb_val_dic = {'Acc': acc, 'AdvAcc': advacc} print('Eval: {}'.format(tb_val_dic)) lr_scheduler.step() lyaer_one_optimizer_lr_scheduler.step() save_checkpoint(i, net, optimizer, lr_scheduler, file_name=os.path.join(config.model_dir, 'epoch-{}.checkpoint'.format(i))) acc, advacc = eval_one_epoch(net, ds_eval, device, eval_attack) tb_val_dic = {'Acc': acc, 'AdvAcc': advacc} print('Eval: {}'.format(tb_val_dic))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--resume', '--resume', default='log/models/epoch-104.checkpoint', type=str, metavar='PATH') parser.add_argument('-d', type=int, default=0, help='Which gpu to use') args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' torch.backends.cudnn.benchmark = True net = create_network() if torch.cuda.device_count() > 1: print('Let\'s use {} GPUs!'.format(torch.cuda.device_count())) net = nn.DataParallel(net) net.to(device) ds_val = create_test_dataset() attack_method = config.create_evaluation_attack_method(device) if os.path.isfile(args.resume): load_checkpoint(args.resume, net) print('Evaluating') clean_acc, adv_acc = eval_one_epoch(net, ds_val, device, attack_method) print('clean acc: {}, adv acc: {}'.format(clean_acc, adv_acc)) file1 = open(os.path.join(config.model_dir, 'mi_eval_results.txt'), 'w') file1.write('MI Eval, clean acc: {}, adv acc: {}'.format( clean_acc, adv_acc)) file1.close()
def main(): model_dir = args.model_dir if not os.path.exists(model_dir): os.makedirs(model_dir) # use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) device = 'cuda' if torch.cuda.is_available() else 'cpu' # kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} model = create_network().to(device) optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) eval_attack = config.create_evaluation_attack_method(device) train_loader = create_train_dataset(args.batch_size) test_loader = create_test_dataset(args.batch_size) now_train_time = 0 for i in range(1, args.epochs + 1): # adjust learning rate for SGD adjust_learning_rate(optimizer, i) s_time = time() descr_str = 'Epoch: {}/{}'.format(i, args.epochs) # adversarial training train(args, model, device, train_loader, optimizer, i, descr_str) now_train_time += time() - s_time if args.log_interval > 0 and i % args.log_interval == 0: acc, advacc = eval_one_epoch(model, test_loader, device, eval_attack) tb_val_dic = {'Acc': acc, 'AdvAcc': advacc} print('Eval: {}'.format(tb_val_dic)) torch.save( model.state_dict(), os.path.join(config.model_dir, 'model-wideres-epoch{}.pt'.format(i))) acc, advacc = eval_one_epoch(model, test_loader, device, eval_attack) tb_val_dic = {'Acc': acc, 'AdvAcc': advacc} print('Eval: {}'.format(tb_val_dic))
def main(): parser = argparse.ArgumentParser() parser.add_argument('--resume', '--resume', default='log/models/last.checkpoint', type=str, metavar='PATH', help='path to latest checkpoint (default:log/last.checkpoint)') parser.add_argument('-d', type=int, default=0, help='Which gpu to use') args = parser.parse_args() device = 'cuda' if torch.cuda.is_available() else 'cpu' torch.backends.cudnn.benchmark = True net = create_network() net.to(device) ds_eval = create_test_dataset(100) attack_method = config.create_evaluation_attack_method(device) if os.path.isfile(args.resume): load_checkpoint(args.resume, net) print('Evaluating') clean_acc, adv_acc = eval_one_epoch(net, ds_eval, device, attack_method) print('clean acc: {}; adv acc: {}'.format(clean_acc, adv_acc))
break now_epoch = now_epoch + 1 descrip_str = 'Training epoch:{}/{} -- lr:{}'.format( now_epoch, config.num_epochs, lr_scheduler.get_lr()[0]) s_time = time.time() acc, yofoacc = train_one_epoch(net, ds_train, optimizer, criterion, LayerOneTrainer, config.K, DEVICE, descrip_str) now_train_time = now_train_time + time.time() - s_time tb_train_dic = {'Acc': acc, 'YofoAcc': yofoacc} print(tb_train_dic) writer.add_scalars('Train', tb_train_dic, now_epoch) if config.val_interval > 0 and now_epoch % config.val_interval == 0: acc, advacc = eval_one_epoch(net, ds_val, DEVICE, EvalAttack) tb_val_dic = {'Acc': acc, 'AdvAcc': advacc} writer.add_scalars('Val', tb_val_dic, now_epoch) tb_val_dic['time'] = now_train_time log_str = json.dumps(tb_val_dic) with open('time.log', 'a') as f: f.write(log_str + '\n') lr_scheduler.step() lyaer_one_optimizer_lr_scheduler.step() delta = datetime.datetime.now() - start_time duration = delta.days * 24 * 60 + (delta.seconds / 60) # in minutes log_file.write("{} {}\n".format(now_epoch, duration)) print("Epoch: {}, Duration: {}".format(now_epoch, duration)) save_checkpoint(now_epoch, net,
def main(): device = 'cuda' if torch.cuda.is_available() else 'cpu' torch.backends.cudnn.benchmark = True # writer = SummaryWriter(log_dir=config.log_dir) net = create_network() net.to(device) criterion = config.create_loss_function().to(device) # criterion = CrossEntropyWithWeightPenalty(net.other_layers, DEVICE, config.weight_decay)#.to(DEVICE) # ce_criterion = nn.CrossEntropyLoss().to(DEVICE) optimizer = config.create_optimizer(net.other_layers.parameters()) lr_scheduler = config.create_lr_scheduler(optimizer) # Make Layer One trainer This part of code should be writen in config.py Hamiltonian_func = Hamiltonian(net.layer_one, config.weight_decay) layer_one_optimizer = optim.SGD(net.layer_one.parameters(), lr=lr_scheduler.get_last_lr()[0], momentum=0.9, weight_decay=5e-4) layer_one_optimizer_lr_scheduler = optim.lr_scheduler.MultiStepLR( layer_one_optimizer, milestones=[55, 75, 95], gamma=0.1) LayerOneTrainer = FastGradientLayerOneTrainer(Hamiltonian_func, layer_one_optimizer, config.inner_iters, config.sigma, config.eps) ds_train = create_train_dataset(args.batch_size) ds_val = create_test_dataset(args.batch_size) # TrainAttack = config.create_attack_method(DEVICE) eval_attack = config.create_evaluation_attack_method(device) now_epoch = 0 if args.auto_continue: args.resume = os.path.join(config.model_dir, 'last.checkpoint') if args.resume is not None and os.path.isfile(args.resume): now_epoch = load_checkpoint(args.resume, net, optimizer, lr_scheduler) for i in range(now_epoch, config.num_epochs): descrip_str = 'Epoch:{}/{} -- lr:{}'.format( i, config.num_epochs, lr_scheduler.get_last_lr()[0]) acc, yofoacc = train_one_epoch(net, ds_train, optimizer, criterion, LayerOneTrainer, config.K, device, descrip_str) tb_train_dic = {'Acc': acc, 'YofoAcc': yofoacc} print('Train: {}'.format(tb_train_dic)) if config.eval_interval > 0 and i % config.eval_interval == 0: acc, advacc = eval_one_epoch(net, ds_val, device, eval_attack) tb_val_dic = {'Acc': acc, 'AdvAcc': advacc} print('Eval: {}'.format(tb_val_dic)) lr_scheduler.step() layer_one_optimizer_lr_scheduler.step() save_checkpoint(i, net, optimizer, lr_scheduler, file_name=os.path.join(config.model_dir, 'epoch-{}.checkpoint'.format(i))) acc, advacc = eval_one_epoch(net, ds_val, device, eval_attack) tb_val_dic = {'Acc': acc, 'AdvAcc': advacc} print('Eval: {}'.format(tb_val_dic))