args.thompson = False ### swag ### args.swag = True args.samples = 50 args.pdts = True args.pdts_batches = 30 args.epochs_init_map = 0 args.epochs = 0 args.lr_swag = 2e-5 args.lr = 1e-4 args.weight_decay_swag = 0.01 args.momentum_swag = 0 args.burnin_swag = 75 args.epochs_swag = 150 args.loss_threshold = -5 args.cov_mat = True args.max_num_models = 20 args.block = False ################################################ # run results = pdts(args, model_idx = 0)
args.wandb_proj = 'official2b' args.wandb_name = 'swag' args.checkpoint_path = '/home/willlamb/checkpoints/map' # ensembling and samples args.ensemble_size = 5 args.pytorch_seeds = [0, 1, 2, 3, 4] args.samples = 30 ### swag ### args.swag = True args.epochs = 0 args.batch_size_swag = 50 args.lr_swag = 2e-5 args.weight_decay_swag = 0.01 args.momentum_swag = 0 args.burnin_swag = 20 args.epochs_swag = 100 args.val_threshold = 2.8 args.cov_mat = True args.max_num_models = 20 args.block = False ################################################ # run results = run_training(args)