def run(args): tag = 'bamos.smooth-topk.seed={}.{}'.format(args.seed, args.dataset) if args.dataset == 'cifar100': tag += '.noise={}'.format(args.noise_labels) elif args.dataset == 'imagenet': tag += '-{}'.format(args.train_size) setproctitle(tag) set_seed(args.seed) xp = create_experiment(args) train_loader, val_loader, test_loader = get_loaders(args) loss = get_loss(xp, args) model = get_model(args) if args.load_model: load_model(model, args.load_model) if args.cuda: if args.parallel_gpu: model = torch.nn.DataParallel(model).cuda() else: torch.cuda.set_device(args.device) model.cuda() loss.cuda() optimizer = get_optimizer(model, args.mu, args.lr_0, xp) if args.load_optimizer: load_optimizer(optimizer, args.load_optimizer, args.lr_0) with logger.stdout_to("{}_log.txt".format(args.out_name)): clock = -time.time() for _ in range(args.epochs): xp.Epoch.update(1).log() optimizer = update_optimizer(args.lr_schedule, optimizer, model, loss, xp) xp.Learning_Rate.update().log() xp.Mu.update().log() xp.Temperature.update().log() train(model, loss, optimizer, train_loader, xp, args) test(model, loss, val_loader, xp, args) test(model, loss, test_loader, xp, args) clock += time.time() print("\nEvaluation time: \t {0:.2g} min".format(clock * 1. / 60))
def main(args): set_seed(args) dataset_train, dataset_val, dataset_test = get_datasets(args) optimizer = get_optimizer(args) obj = get_objective(args, optimizer.hparams) xp = get_xp(args, optimizer) for i in range(args.epochs): xp.Epoch.update(1).log() train(obj, optimizer, dataset_train, xp, args, i) test(obj, optimizer, dataset_val, xp, args, i) test(obj, optimizer, dataset_test, xp, args, i) print_total_time(xp)
def run(args): set_seed(args.seed) xp = create_experiment(args) train_loader, val_loader, test_loader = get_loaders(args) loss = get_loss(xp, args) model = get_model(args) if args.load_model: load_model(model, args.load_model) if args.cuda: if args.parallel_gpu: model = torch.nn.DataParallel(model).cuda() else: torch.cuda.set_device(args.device) model.cuda() loss.cuda() optimizer = get_optimizer(model, args.mu, args.lr_0, xp) if args.load_optimizer: load_optimizer(optimizer, args.load_optimizer, args.lr_0) with logger.stdout_to("{}_log.txt".format(args.out_name)): clock = -time.time() for _ in range(args.epochs): xp.Epoch.update(1).log() optimizer = update_optimizer(args.lr_schedule, optimizer, model, loss, xp) xp.Learning_Rate.update().log() xp.Mu.update().log() xp.Temperature.update().log() train(model, loss, optimizer, train_loader, xp, args) test(model, loss, val_loader, xp, args) test(model, loss, test_loader, xp, args) clock += time.time() print("\nEvaluation time: \t {0:.2g} min".format(clock * 1. / 60))
def main(args): set_cuda(args) set_seed(args) loader_train, loader_val, loader_test = get_data_loaders(args) loss = get_loss(args) model = get_model(args) optimizer = get_optimizer(args, parameters=model.parameters()) xp = get_xp(args, model, optimizer) for i in range(args.epochs): xp.Epoch.update(1).log() train(model, loss, optimizer, loader_train, xp, args) test(model, loader_val, xp, args) if (i + 1) in args.T: decay_optimizer(optimizer, args.decay_factor) load_best_model(model, xp) test(model, loader_test, xp, args)
def main(args): set_cuda(args) set_seed(args) loader_train, loader_val, loader_test = get_data_loaders(args) loss = get_loss(args) model = get_model(args) optimizer = get_optimizer(args, model, loss, parameters=model.parameters()) xp = setup_xp(args, model, optimizer) for i in range(args.epochs): xp.epoch.update(i) train(model, loss, optimizer, loader_train, args, xp) test(model, optimizer, loader_val, args, xp) if (i + 1) in args.T: decay_optimizer(optimizer, args.decay_factor) load_best_model(model, '{}/best_model.pkl'.format(args.xp_name)) test(model, optimizer, loader_val, args, xp) test(model, optimizer, loader_test, args, xp)