def train_epoch(model, loaders, criterion, optimizer, epoch, end_epoch, eval_freq=1, save_freq=10, output_dir='./', lr_init=0.01): time_ep = time.time() lr = training_utils.schedule(epoch, lr_init, end_epoch, swa=False) training_utils.adjust_learning_rate(optimizer, lr) train_res = training_utils.train_epoch(loaders["train"], model, criterion, optimizer) if (epoch == 0 or epoch % eval_freq == eval_freq - 1 or epoch == end_epoch - 1): test_res = training_utils.eval(loaders["test"], model, criterion) else: test_res = {"loss": None, "accuracy": None} if (epoch + 1) % save_freq == 0: training_utils.save_checkpoint( output_dir, epoch + 1, state_dict=model.state_dict(), optimizer=optimizer.state_dict(), ) time_ep = time.time() - time_ep values = [ epoch + 1, lr, train_res["loss"], train_res["accuracy"], test_res["loss"], test_res["accuracy"], time_ep, ] table = tabulate.tabulate([values], columns, tablefmt="simple", floatfmt="8.4f") if epoch % 40 == 0: table = table.split("\n") table = "\n".join([table[1]] + table) else: table = table.split("\n")[2] print(table)
# args.dir, # start_epoch, # state_dict=model.state_dict(), # optimizer=optimizer.state_dict(), # ) sgd_ens_preds = None sgd_targets = None n_ensembled = 0.0 for epoch in range(start_epoch, args.epochs): time_ep = time.time() if not args.no_schedule: lr = schedule(epoch) utils.adjust_learning_rate(optimizer, lr) else: lr = args.lr_init if (args.swa and (epoch + 1) > args.swa_start) and args.cov_mat: #train_res = utils.train_epoch(loaders["train"], model, criterion, optimizer, weight_decay=args.wd) train_res, velocity = utils.train_epoch_v2(loaders["train"], model, criterion, optimizer, weight_decay=args.wd, velocity=velocity) else: #train_res = utils.train_epoch(loaders["train"], model, criterion, optimizer, weight_decay=args.wd) train_res, velocity = utils.train_epoch_v2(loaders["train"], model,
epoch=epoch, name='swag', state_dict=swag_model.state_dict(), ) if args.optimizer == 'RMSProp': ### Adjust Lr ### if epoch < args.ft_start: scheduler.step(epoch=epoch) else: scheduler.step(epoch=-1) #reset to args.lr_init elif args.optimizer == 'SGD': lr = schedule(epoch, args.lr_init, args.epochs, args.swa, args.swa_start, args.swa_lr) adjust_learning_rate(optimizer, lr) ### Test set ### if args.swa: swag_model.sample(0.0) bn_update(train_loader, swag_model) test_loss, test_err, test_iou = train_utils.test(swag_model, loaders['test'], criterion) print('SWA Test - Loss: {:.4f} | Acc: {:.4f} | IOU: {:.4f}'.format( test_loss, 1 - test_err, test_iou)) test_loss, test_err, test_iou = train_utils.test(model, loaders['test'], criterion) print('SGD Test - Loss: {:.4f} | Acc: {:.4f} | IOU: {:.4f}'.format( test_loss, 1 - test_err, test_iou))