def train_unet(args): """ Wrapper for reconstruction (U-Net) model training. :param args: Arguments object, containing training hyperparameters. """ args.exp_dir.mkdir(parents=True, exist_ok=True) writer = SummaryWriter(log_dir=args.exp_dir / 'summary') if args.resume: recon_model, args, start_epoch, optimizer = load_recon_model( args.recon_model_checkpoint, optim=True) else: model = build_reconstruction_model(args) if args.data_parallel: model = torch.nn.DataParallel(model) optimizer = build_optim(args, model.parameters()) best_dev_loss = 1e9 start_epoch = 0 logging.info(args) logging.info(model) # Save arguments for bookkeeping args_dict = { key: str(value) for key, value in args.__dict__.items() if not key.startswith('__') and not callable(key) } save_json(args.exp_dir / 'args.json', args_dict) train_loader = create_data_loader(args, 'train', shuffle=True) dev_loader = create_data_loader(args, 'val') display_loader = create_data_loader(args, 'val', display=True) scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_step_size, args.lr_gamma) for epoch in range(start_epoch, args.num_epochs): train_loss, train_time = train_epoch(args, epoch, model, train_loader, optimizer, writer) dev_loss, dev_l1loss, dev_time = evaluate_loss(args, epoch, model, dev_loader, writer) visualize(args, epoch, model, display_loader, writer) scheduler.step() is_new_best = dev_loss < best_dev_loss best_dev_loss = min(best_dev_loss, dev_loss) save_model(args, args.exp_dir, epoch, model, optimizer, best_dev_loss, is_new_best) logging.info( f'Epoch = [{epoch:4d}/{args.num_epochs:4d}] TrainL1Loss = {train_loss:.4g} DevL1Loss = {dev_l1loss:.4g} ' f'DevLoss = {dev_loss:.4g} TrainTime = {train_time:.4f}s DevTime = {dev_time:.4f}s', ) writer.close()
def load_policy_model(checkpoint_file, optim=False): checkpoint = torch.load(checkpoint_file) args = checkpoint['args'] model = build_policy_model(args) if not optim: # No gradients for this model for param in model.parameters(): param.requires_grad = False if args.data_parallel: model = torch.nn.DataParallel(model) model.load_state_dict(checkpoint['model']) start_epoch = checkpoint['epoch'] if optim: optimizer = build_optim(args, model.parameters()) optimizer.load_state_dict(checkpoint['optimizer']) del checkpoint return model, args, start_epoch, optimizer del checkpoint return model, args
def load_recon_model(args, optim=False): checkpoint = torch.load(args.recon_model_checkpoint) recon_args = checkpoint['args'] recon_model = build_reconstruction_model(recon_args) if not optim: # No gradients for this model for param in recon_model.parameters(): param.requires_grad = False if recon_args.data_parallel: # if model was saved with data_parallel recon_model = torch.nn.DataParallel(recon_model) recon_model.load_state_dict(checkpoint['model']) start_epoch = checkpoint['epoch'] if optim: optimizer = build_optim(args, recon_model.parameters()) optimizer.load_state_dict(checkpoint['optimizer']) del checkpoint return recon_model, recon_args, start_epoch, optimizer del checkpoint return recon_args, recon_model
def train_and_eval(args, recon_args, recon_model): if args.resume: # Check that this works resumed = True new_run_dir = args.policy_model_checkpoint.parent data_path = args.data_path # In case models have been moved to a different machine, make sure the path to the recon model is the # path provided. recon_model_checkpoint = args.recon_model_checkpoint model, args, start_epoch, optimiser = load_policy_model(pathlib.Path( args.policy_model_checkpoint), optim=True) args.old_run_dir = args.run_dir args.old_recon_model_checkpoint = args.recon_model_checkpoint args.old_data_path = args.data_path args.recon_model_checkpoint = recon_model_checkpoint args.run_dir = new_run_dir args.data_path = data_path args.resume = True else: resumed = False # Improvement model to train model = build_policy_model(args) # Add mask parameters for training args = add_mask_params(args) if args.data_parallel: model = torch.nn.DataParallel(model) optimiser = build_optim(args, model.parameters()) start_epoch = 0 # Create directory to store results in savestr = '{}_res{}_al{}_accel{}_k{}_{}_{}'.format( args.dataset, args.resolution, args.acquisition_steps, args.accelerations, args.num_trajectories, datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"), ''.join(choice(ascii_uppercase) for _ in range(5))) args.run_dir = args.exp_dir / savestr args.run_dir.mkdir(parents=True, exist_ok=False) args.resumed = resumed if args.wandb: allow_val_change = args.resumed # only allow changes if resumed: otherwise something is wrong. wandb.config.update(args, allow_val_change=allow_val_change) wandb.watch(model, log='all') # Logging logging.info(recon_model) logging.info(model) # Save arguments for bookkeeping args_dict = { key: str(value) for key, value in args.__dict__.items() if not key.startswith('__') and not callable(key) } save_json(args.run_dir / 'args.json', args_dict) # Initialise summary writer writer = SummaryWriter(log_dir=args.run_dir / 'summary') # Parameter counting logging.info( 'Reconstruction model parameters: total {}, of which {} trainable and {} untrainable' .format(count_parameters(recon_model), count_trainable_parameters(recon_model), count_untrainable_parameters(recon_model))) logging.info( 'Policy model parameters: total {}, of which {} trainable and {} untrainable' .format(count_parameters(model), count_trainable_parameters(model), count_untrainable_parameters(model))) if args.scheduler_type == 'step': scheduler = torch.optim.lr_scheduler.StepLR(optimiser, args.lr_step_size, args.lr_gamma) elif args.scheduler_type == 'multistep': if not isinstance(args.lr_multi_step_size, list): args.lr_multi_step_size = [args.lr_multi_step_size] scheduler = torch.optim.lr_scheduler.MultiStepLR( optimiser, args.lr_multi_step_size, args.lr_gamma) else: raise ValueError( "{} is not a valid scheduler choice ('step', 'multistep')".format( args.scheduler_type)) # Create data loaders train_loader = create_data_loader(args, 'train', shuffle=True) dev_loader = create_data_loader(args, 'val', shuffle=False) train_data_range_dict = create_data_range_dict(args, train_loader) dev_data_range_dict = create_data_range_dict(args, dev_loader) if not args.resume: if args.do_train_ssim: do_and_log_evaluation(args, -1, recon_model, model, train_loader, writer, 'Train', train_data_range_dict) do_and_log_evaluation(args, -1, recon_model, model, dev_loader, writer, 'Val', dev_data_range_dict) for epoch in range(start_epoch, args.num_epochs): train_loss, train_time = train_epoch(args, epoch, recon_model, model, train_loader, optimiser, writer, train_data_range_dict) logging.info( f'Epoch = [{epoch+1:3d}/{args.num_epochs:3d}] TrainLoss = {train_loss:.3g} TrainTime = {train_time:.2f}s ' ) if args.do_train_ssim: do_and_log_evaluation(args, epoch, recon_model, model, train_loader, writer, 'Train', train_data_range_dict) do_and_log_evaluation(args, epoch, recon_model, model, dev_loader, writer, 'Val', dev_data_range_dict) scheduler.step() save_policy_model(args, args.run_dir, epoch, model, optimiser) writer.close()