update_freq=args.update_freq) criterion = torch.nn.CrossEntropyLoss() best_acc = 0.0 for epoch in range(args.epochs): track.debug("Starting epoch %d" % epoch) args.lr = adjust_learning_rate(epoch, optimizer, args.lr, args.schedule, args.gamma) train_loss, train_acc = train(trainloader, model, criterion, optimizer, epoch, args.cuda) test_loss, test_acc = test(testloader, model, criterion, epoch, args.cuda) track.debug('Finished epoch %d... | train loss %.3f | train acc %.3f ' '| test loss %.3f | test acc %.3f' % (epoch, train_loss, train_acc, test_loss, test_acc)) # Save model model_fname = os.path.join(track.trial_dir(), "model{}.ckpt".format(epoch)) torch.save(model, model_fname) if test_acc > best_acc: best_acc = test_acc best_fname = os.path.join(track.trial_dir(), "best.ckpt") track.debug("New best score! Saving model") torch.save(model, best_fname) if __name__ == '__main__': skeletor.supply_args(add_train_args) skeletor.execute(do_training)
""" loads the model and trial data and runs the specified experiment(s)! """ # load the project from track proj = track.Project(args.results_dir) # create the ensemble model, trial_df = load_trial(proj, args.start_epoch, args.end_epoch, args.noise_scale) # register svhn so we can load it in OOD sk.datasets.add_dataset(svhn) # run the experiment def _run(experiment): track.debug('Starting to run experiment: %s' % experiment) experiment_module = 'sgld.experiments.' + experiment runner = getattr(importlib.import_module(experiment_module), 'run') runner(model, trial_df, **vars(args)) if args.mode == 'all': for experiment in EXPERIMENTS: _run(experiment) else: _run(args.mode) if __name__ == '__main__': sk.supply_args(make_args) sk.execute(main)
num_workers=2, ) if args.fname: print("Loading model from %s" % args.fname) model = torch.load(args.fname, map_location="cpu").cuda() else: model = build_model("ResNet18", num_classes=10) criterion = torch.nn.CrossEntropyLoss() eigenvals, eigenvecs = compute_hessian_eigenthings( model, testloader, criterion, args.num_eigenthings, mode=args.mode, # power_iter_steps=args.num_steps, max_samples=args.max_samples, # momentum=args.momentum, full_dataset=args.full_dataset, use_gpu=args.cuda, ) print("Eigenvecs:") print(eigenvecs) print("Eigenvals:") print(eigenvals) # track.metric(iteration=0, eigenvals=eigenvals) if __name__ == "__main__": skeletor.supply_args(extra_args) skeletor.execute(main)
track.debug("FINAL STATS:%s" % _debug_stats_str(final_stats)) def _add_args(parser): parser.add_argument('--alg', default='deepq', help='agent to train', choices=list(ALG_LEARN_FNS.keys())) parser.add_argument('--env', default='CartPole-v0', help='Gym environment name to train on') parser.add_argument('--attack', default='fgsm', choices=list(ATTACKS.keys()), help='attack method to run') parser.add_argument('--network', default='mlp', type=str, help='policy network arhitecture') parser.add_argument('--attack_ord', default=2, type=int, help="norm we use to constrain perturbation size") parser.add_argument('--num_rollouts', default=5, type=int, help='how many episodes to run for each attack') parser.add_argument('--eps', default=.1, type=float, help='perturbation magnitude') parser.add_argument('--model_dir', default='./models', type=str, help='where to look for model pkls by default') parser.add_argument('--load_path', default='', help='location of model .pkl with correct policy') parser.add_argument('--render', action='store_true', help='if true, render the actual gym env on-screen') if __name__ == '__main__': skeletor.supply_args(_add_args) skeletor.execute(main)