def train(cfg: Namespace) -> None: assert cfg.device == "cpu" or (cfg.device == "cuda" and T.cuda.is_available()) root_dir = Path(__file__).resolve().parents[1] logger.info("training: experiment %s" % (cfg.exp_name)) # make dir-tree exp_dir = root_dir / "experiments" / cfg.exp_name for d in ["out", "checkpoint", "logs"]: os.makedirs(exp_dir / d, exist_ok=True) cfg.to_file(exp_dir / "train_config.json") # tb tb_writer tb_writer = SummaryWriter(exp_dir / "logs") logger.info("started tensorboard writer") model = CAE() model.train() if cfg.device == "cuda": model.cuda() logger.info(f"loaded model on {cfg.device}") dataloader = DataLoader( dataset=ImageFolder720p(cfg.dataset_path), batch_size=cfg.batch_size, shuffle=cfg.shuffle, num_workers=cfg.num_workers, ) logger.info(f"loaded dataset from {cfg.dataset_path}") optimizer = optim.Adam(model.parameters(), lr=cfg.learning_rate, weight_decay=1e-5) loss_criterion = nn.MSELoss() avg_loss, epoch_avg = 0.0, 0.0 ts = 0 # EPOCHS for epoch_idx in range(cfg.start_epoch, cfg.num_epochs + 1): # BATCHES for batch_idx, data in enumerate(dataloader, start=1): img, patches, _ = data if cfg.device == "cuda": patches = patches.cuda() avg_loss_per_image = 0.0 for i in range(6): for j in range(10): optimizer.zero_grad() x = patches[:, :, i, j, :, :] y = model(x) loss = loss_criterion(y, x) avg_loss_per_image += (1 / 60) * loss.item() loss.backward() optimizer.step() avg_loss += avg_loss_per_image epoch_avg += avg_loss_per_image if batch_idx % cfg.batch_every == 0: tb_writer.add_scalar("train/avg_loss", avg_loss / cfg.batch_every, ts) for name, param in model.named_parameters(): tb_writer.add_histogram(name, param, ts) logger.debug("[%3d/%3d][%5d/%5d] avg_loss: %.8f" % ( epoch_idx, cfg.num_epochs, batch_idx, len(dataloader), avg_loss / cfg.batch_every, )) avg_loss = 0.0 ts += 1 # -- end batch every if batch_idx % cfg.save_every == 0: out = T.zeros(6, 10, 3, 128, 128) for i in range(6): for j in range(10): x = patches[0, :, i, j, :, :].unsqueeze(0).cuda() out[i, j] = model(x).cpu().data out = np.transpose(out, (0, 3, 1, 4, 2)) out = np.reshape(out, (768, 1280, 3)) out = np.transpose(out, (2, 0, 1)) y = T.cat((img[0], out), dim=2).unsqueeze(0) save_imgs( imgs=y, to_size=(3, 768, 2 * 1280), name=exp_dir / f"out/{epoch_idx}_{batch_idx}.png", ) # -- end save every # -- end batches if epoch_idx % cfg.epoch_every == 0: epoch_avg /= len(dataloader) * cfg.epoch_every tb_writer.add_scalar( "train/epoch_avg_loss", avg_loss / cfg.batch_every, epoch_idx // cfg.epoch_every, ) logger.info("Epoch avg = %.8f" % epoch_avg) epoch_avg = 0.0 T.save(model.state_dict(), exp_dir / f"checkpoint/model_{epoch_idx}.pth") # -- end epoch every # -- end epoch # save final model T.save(model.state_dict(), exp_dir / "model_final.pth") # cleaning tb_writer.close()
def train(cfg: Namespace) -> None: print(cfg.device) assert cfg.device == 'cpu' or (cfg.device == 'cuda' and T.cuda.is_available()) logger.info('training: experiment %s' % (cfg.exp_name)) # make dir-tree exp_dir = ROOT_DIR / 'experiments' / cfg.exp_name for d in ['out', 'checkpoint', 'logs']: os.makedirs(exp_dir / d, exist_ok=True) cfg.to_file(exp_dir / 'train_config.txt') # tb writer writer = SummaryWriter(exp_dir / 'logs') model = CAE() model.train() if cfg.device == 'cuda': model.cuda() logger.info(f'loaded model on {cfg.device}') dataset = ImageFolder720p(cfg.dataset_path) dataloader = DataLoader(dataset, batch_size=cfg.batch_size, shuffle=cfg.shuffle, num_workers=cfg.num_workers) logger.info('loaded dataset') optimizer = optim.Adam(model.parameters(), lr=cfg.learning_rate, weight_decay=1e-5) loss_criterion = nn.MSELoss() avg_loss, epoch_avg = 0.0, 0.0 ts = 0 # EPOCHS for epoch_idx in range(cfg.start_epoch, cfg.num_epochs + 1): # BATCHES for batch_idx, data in enumerate(dataloader, start=1): img, patches, _ = data if cfg.device == 'cuda': patches = patches.cuda() avg_loss_per_image = 0.0 for i in range(6): for j in range(10): optimizer.zero_grad() x = patches[:, :, i, j, :, :] y = model(x) loss = loss_criterion(y, x) avg_loss_per_image += (1 / 60) * loss.item() loss.backward() optimizer.step() avg_loss += avg_loss_per_image epoch_avg += avg_loss_per_image if batch_idx % cfg.batch_every == 0: writer.add_scalar('train/avg_loss', avg_loss / cfg.batch_every, ts) for name, param in model.named_parameters(): writer.add_histogram(name, param, ts) logger.debug('[%3d/%3d][%5d/%5d] avg_loss: %.8f' % (epoch_idx, cfg.num_epochs, batch_idx, len(dataloader), avg_loss / cfg.batch_every)) avg_loss = 0.0 ts += 1 # -- end batch every if batch_idx % cfg.save_every == 0: out = T.zeros(6, 10, 3, 128, 128) for i in range(6): for j in range(10): x = patches[0, :, i, j, :, :].unsqueeze(0).cuda() out[i, j] = model(x).cpu().data out = np.transpose(out, (0, 3, 1, 4, 2)) out = np.reshape(out, (768, 1280, 3)) out = np.transpose(out, (2, 0, 1)) y = T.cat((img[0], out), dim=2).unsqueeze(0) save_imgs(imgs=y, to_size=(3, 768, 2 * 1280), name=exp_dir / f'out/{epoch_idx}_{batch_idx}.png') # -- end save every # -- end batches if epoch_idx % cfg.epoch_every == 0: epoch_avg /= (len(dataloader) * cfg.epoch_every) writer.add_scalar('train/epoch_avg_loss', avg_loss / cfg.batch_every, epoch_idx // cfg.epoch_every) logger.info('Epoch avg = %.8f' % epoch_avg) epoch_avg = 0.0 T.save(model.state_dict(), exp_dir / f'checkpoint/model_{epoch_idx}.state') # -- end epoch every # -- end epoch # save final model T.save(model.state_dict(), exp_dir / 'model_final.state') # cleaning writer.close()
def train(cfg): os.makedirs(f"out/{cfg['exp_name']}", exist_ok=True) os.makedirs(f"checkpoints/{cfg['exp_name']}", exist_ok=True) # dump config for current experiment with open(f"checkpoints/{cfg['exp_name']}/setup.cfg", "wt") as f: for k, v in cfg.items(): f.write("%15s: %s\n" % (k, v)) model = CAE().cuda() if cfg['load']: model.load_state_dict(torch.load(cfg['chkpt'])) logger.info("Loaded model from", cfg['chkpt']) model.train() logger.info("Done setup model") dataset = ImageFolder720p(cfg['dataset_path']) dataloader = DataLoader(dataset, batch_size=cfg['batch_size'], shuffle=cfg['shuffle'], num_workers=cfg['num_workers']) logger.info( f"Done setup dataloader: {len(dataloader)} batches of size {cfg['batch_size']}" ) mse_loss = nn.MSELoss() adam = torch.optim.Adam(model.parameters(), lr=cfg['learning_rate'], weight_decay=1e-5) sgd = torch.optim.SGD(model.parameters(), lr=cfg['learning_rate']) optimizer = adam ra = 0 for ei in range(cfg['resume_epoch'], cfg['num_epochs']): for bi, (img, patches, _) in enumerate(dataloader): avg_loss = 0 for i in range(6): for j in range(10): x = Variable(patches[:, :, i, j, :, :]).cuda() y = model(x) loss = mse_loss(y, x) avg_loss += (1 / 60) * loss.item() optimizer.zero_grad() loss.backward() optimizer.step() ra = avg_loss if bi == 0 else ra * bi / (bi + 1) + avg_loss / (bi + 1) logger.debug('[%3d/%3d][%5d/%5d] avg_loss: %f, ra: %f' % (ei + 1, cfg['num_epochs'], bi + 1, len(dataloader), avg_loss, ra)) # save img if bi % cfg['out_every'] == 0: out = torch.zeros(6, 10, 3, 128, 128) for i in range(6): for j in range(10): x = Variable(patches[0, :, i, j, :, :].unsqueeze(0)).cuda() out[i, j] = model(x).cpu().data out = np.transpose(out, (0, 3, 1, 4, 2)) out = np.reshape(out, (768, 1280, 3)) out = np.transpose(out, (2, 0, 1)) y = torch.cat((img[0], out), dim=2).unsqueeze(0) save_imgs(imgs=y, to_size=(3, 768, 2 * 1280), name=f"out/{cfg['exp_name']}/out_{ei}_{bi}.png") # save model if bi % cfg['save_every'] == cfg['save_every'] - 1: torch.save( model.state_dict(), f"checkpoints/{cfg['exp_name']}/model_{ei}_{bi}.state") # save final model torch.save(model.state_dict(), f"checkpoints/{cfg['exp_name']}/model_final.state")