Esempio n. 1
0
def train_unet(args):
    """
    Wrapper for reconstruction (U-Net) model training.

    :param args: Arguments object, containing training hyperparameters.
    """
    args.exp_dir.mkdir(parents=True, exist_ok=True)
    writer = SummaryWriter(log_dir=args.exp_dir / 'summary')

    if args.resume:
        recon_model, args, start_epoch, optimizer = load_recon_model(
            args.recon_model_checkpoint, optim=True)
    else:
        model = build_reconstruction_model(args)
        if args.data_parallel:
            model = torch.nn.DataParallel(model)
        optimizer = build_optim(args, model.parameters())
        best_dev_loss = 1e9
        start_epoch = 0
    logging.info(args)
    logging.info(model)

    # Save arguments for bookkeeping
    args_dict = {
        key: str(value)
        for key, value in args.__dict__.items()
        if not key.startswith('__') and not callable(key)
    }
    save_json(args.exp_dir / 'args.json', args_dict)

    train_loader = create_data_loader(args, 'train', shuffle=True)
    dev_loader = create_data_loader(args, 'val')
    display_loader = create_data_loader(args, 'val', display=True)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_step_size,
                                                args.lr_gamma)

    for epoch in range(start_epoch, args.num_epochs):
        train_loss, train_time = train_epoch(args, epoch, model, train_loader,
                                             optimizer, writer)
        dev_loss, dev_l1loss, dev_time = evaluate_loss(args, epoch, model,
                                                       dev_loader, writer)
        visualize(args, epoch, model, display_loader, writer)
        scheduler.step()

        is_new_best = dev_loss < best_dev_loss
        best_dev_loss = min(best_dev_loss, dev_loss)
        save_model(args, args.exp_dir, epoch, model, optimizer, best_dev_loss,
                   is_new_best)
        logging.info(
            f'Epoch = [{epoch:4d}/{args.num_epochs:4d}] TrainL1Loss = {train_loss:.4g} DevL1Loss = {dev_l1loss:.4g} '
            f'DevLoss = {dev_loss:.4g} TrainTime = {train_time:.4f}s DevTime = {dev_time:.4f}s',
        )
    writer.close()
Esempio n. 2
0
def main(base_args):
    results_dict = defaultdict(lambda: defaultdict(dict))

    runs = base_args.data_runs
    traj = base_args.num_trajectories

    for i, run_dir in enumerate(base_args.policy_model_dir_list):
        args_dict = load_json(base_args.base_policy_model_dir / run_dir / 'args.json')
        if args_dict['model_type'] == 'greedy':
            mode = 'greedy'
            label = 'greedy'
        else:
            mode = 'nongreedy'
            label = args_dict.get('gamma', None)

        sr = json.loads(args_dict['sample_rate'])
        accels = json.loads(args_dict['accelerations'])
        steps = json.loads(args_dict['acquisition_steps'])
        assert len(accels) == 1, "Using models trained with various accelerations is not supported!"
        accel = accels[0]

        for j, epoch in enumerate(base_args.epochs):
            args = copy.deepcopy(base_args)
            args.mode = mode

            if epoch != max(base_args.epochs):
                args.policy_model_checkpoint = base_args.base_policy_model_dir / run_dir / 'model_{}.pt'.format(epoch)
            else:  # Last epoch model is not always stored separately depending on logging details
                args.policy_model_checkpoint = base_args.base_policy_model_dir / run_dir / 'model.pt'

            pr_str = (f"Job {i*len(base_args.epochs)+j+1}/{len(base_args.policy_model_dir_list) * len(base_args.epochs)}"
                      f"\n   mode: {mode:>9}, accel: {accel:>2}, steps: {steps:>2}, label: {label},\n"
                      f"   ckpt: {epoch:>2}, runs: {runs:>2}, srate: {sr:>3}, traj: {traj:>2}")
            print(pr_str)

            weight_path, bias_path, param_dir = compute_gradients(args, epoch)
            snr, std = compute_snr(args, weight_path, bias_path)

            summary_dict = {'snr': str(snr),
                            'snr_std': str(std),
                            'weight_grads': str(weight_path),
                            'bias_grads': str(bias_path)}

            summary_path = param_dir / f'snr_summary.json'
            print(f"   Saving summary to {summary_path}")
            save_json(summary_path, summary_dict)

            results_dict[run_dir][f'Epoch: {epoch}'] = {'job': (mode, traj, runs, sr, accel, steps, label),
                                                        'snr': str(snr),
                                                        'snr_std': str(std)}
            print(f'SNR: {snr}, STD: {std}')

    savestr = f'{datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S")}.json'
    save_dir = pathlib.Path(os.getcwd()) / f'snr_results'
    save_dir.mkdir(parents=True, exist_ok=True)
    save_file = save_dir / savestr

    print('\nFinal results:')
    pprint(results_dict)

    print(f'\nSaving results to: {save_file}')
    save_json(save_file, results_dict)
Esempio n. 3
0
def train_and_eval(args, recon_args, recon_model):
    if args.resume:
        # Check that this works
        resumed = True
        new_run_dir = args.policy_model_checkpoint.parent
        data_path = args.data_path
        # In case models have been moved to a different machine, make sure the path to the recon model is the
        # path provided.
        recon_model_checkpoint = args.recon_model_checkpoint

        model, args, start_epoch, optimiser = load_policy_model(pathlib.Path(
            args.policy_model_checkpoint),
                                                                optim=True)

        args.old_run_dir = args.run_dir
        args.old_recon_model_checkpoint = args.recon_model_checkpoint
        args.old_data_path = args.data_path

        args.recon_model_checkpoint = recon_model_checkpoint
        args.run_dir = new_run_dir
        args.data_path = data_path
        args.resume = True
    else:
        resumed = False
        # Improvement model to train
        model = build_policy_model(args)
        # Add mask parameters for training
        args = add_mask_params(args)
        if args.data_parallel:
            model = torch.nn.DataParallel(model)
        optimiser = build_optim(args, model.parameters())
        start_epoch = 0
        # Create directory to store results in
        savestr = '{}_res{}_al{}_accel{}_k{}_{}_{}'.format(
            args.dataset, args.resolution, args.acquisition_steps,
            args.accelerations, args.num_trajectories,
            datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"),
            ''.join(choice(ascii_uppercase) for _ in range(5)))
        args.run_dir = args.exp_dir / savestr
        args.run_dir.mkdir(parents=True, exist_ok=False)

    args.resumed = resumed

    if args.wandb:
        allow_val_change = args.resumed  # only allow changes if resumed: otherwise something is wrong.
        wandb.config.update(args, allow_val_change=allow_val_change)
        wandb.watch(model, log='all')

    # Logging
    logging.info(recon_model)
    logging.info(model)
    # Save arguments for bookkeeping
    args_dict = {
        key: str(value)
        for key, value in args.__dict__.items()
        if not key.startswith('__') and not callable(key)
    }
    save_json(args.run_dir / 'args.json', args_dict)

    # Initialise summary writer
    writer = SummaryWriter(log_dir=args.run_dir / 'summary')

    # Parameter counting
    logging.info(
        'Reconstruction model parameters: total {}, of which {} trainable and {} untrainable'
        .format(count_parameters(recon_model),
                count_trainable_parameters(recon_model),
                count_untrainable_parameters(recon_model)))
    logging.info(
        'Policy model parameters: total {}, of which {} trainable and {} untrainable'
        .format(count_parameters(model), count_trainable_parameters(model),
                count_untrainable_parameters(model)))

    if args.scheduler_type == 'step':
        scheduler = torch.optim.lr_scheduler.StepLR(optimiser,
                                                    args.lr_step_size,
                                                    args.lr_gamma)
    elif args.scheduler_type == 'multistep':
        if not isinstance(args.lr_multi_step_size, list):
            args.lr_multi_step_size = [args.lr_multi_step_size]
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimiser, args.lr_multi_step_size, args.lr_gamma)
    else:
        raise ValueError(
            "{} is not a valid scheduler choice ('step', 'multistep')".format(
                args.scheduler_type))

    # Create data loaders
    train_loader = create_data_loader(args, 'train', shuffle=True)
    dev_loader = create_data_loader(args, 'val', shuffle=False)

    train_data_range_dict = create_data_range_dict(args, train_loader)
    dev_data_range_dict = create_data_range_dict(args, dev_loader)

    if not args.resume:
        if args.do_train_ssim:
            do_and_log_evaluation(args, -1, recon_model, model, train_loader,
                                  writer, 'Train', train_data_range_dict)
        do_and_log_evaluation(args, -1, recon_model, model, dev_loader, writer,
                              'Val', dev_data_range_dict)

    for epoch in range(start_epoch, args.num_epochs):
        train_loss, train_time = train_epoch(args, epoch, recon_model, model,
                                             train_loader, optimiser, writer,
                                             train_data_range_dict)
        logging.info(
            f'Epoch = [{epoch+1:3d}/{args.num_epochs:3d}] TrainLoss = {train_loss:.3g} TrainTime = {train_time:.2f}s '
        )

        if args.do_train_ssim:
            do_and_log_evaluation(args, epoch, recon_model, model,
                                  train_loader, writer, 'Train',
                                  train_data_range_dict)
        do_and_log_evaluation(args, epoch, recon_model, model, dev_loader,
                              writer, 'Val', dev_data_range_dict)

        scheduler.step()
        save_policy_model(args, args.run_dir, epoch, model, optimiser)
    writer.close()
Esempio n. 4
0
def main(args):
    """
    Wrapper for running baseline models.

    :param args: Arguments object containing hyperparameters for baseline models.
    """

    # For consistency
    args.val_batch_size = args.batch_size
    # Reconstruction model
    recon_args, recon_model = load_recon_model(args)
    # Add mask parameters for training
    args = add_mask_params(args)

    # Create directory to store results in
    savestr = '{}_res{}_al{}_accel{}_{}_{}_{}'.format(
        args.dataset, args.resolution, args.acquisition_steps,
        args.accelerations, args.model_type,
        datetime.datetime.now().strftime("%Y-%m-%d_%H:%M:%S"),
        ''.join(choice(ascii_uppercase) for _ in range(5)))
    args.run_dir = args.exp_dir / savestr
    args.run_dir.mkdir(parents=True, exist_ok=False)

    if args.wandb:
        wandb.config.update(args)

    # Logging
    logging.info(args)
    logging.info(recon_model)
    logging.info('Model type: {}'.format(args.model_type))

    # Save arguments for bookkeeping
    args_dict = {
        key: str(value)
        for key, value in args.__dict__.items()
        if not key.startswith('__') and not callable(key)
    }
    save_json(args.run_dir / 'args.json', args_dict)

    # Initialise summary writer
    writer = SummaryWriter(log_dir=args.run_dir / 'summary')

    if args.model_type == 'average_oracle':
        baseline_ssims, baseline_psnrs, baseline_time = run_average_oracle(
            args, recon_model)
    else:
        # Create data loader
        loader = create_data_loader(args, args.partition)
        data_range_dict = create_data_range_dict(args, loader)
        baseline_ssims, baseline_psnrs, baseline_time = run_baseline(
            args, recon_model, loader, data_range_dict)

    # Logging
    ssims_str = ", ".join(
        ["{}: {:.4f}".format(i, l) for i, l in enumerate(baseline_ssims)])
    psnrs_str = ", ".join(
        ["{}: {:.4f}".format(i, l) for i, l in enumerate(baseline_psnrs)])
    logging.info(f'  SSIM = [{ssims_str}]')
    logging.info(f'  PSNR = [{psnrs_str}]')
    logging.info(f'  Time = {baseline_time:.2f}s')

    # For storing in wandb
    for epoch in range(args.num_epochs + 1):
        if args.wandb:
            wandb.log(
                {
                    f'{args.partition}_ssims':
                    {str(key): val
                     for key, val in enumerate(baseline_ssims)}
                },
                step=epoch)
            wandb.log(
                {
                    f'{args.partition}_psnrs':
                    {str(key): val
                     for key, val in enumerate(baseline_psnrs)}
                },
                step=epoch)

    writer.close()