def main(args):
    print('creating data loaders...')
    data_loader = create_data_loaders(args)
    print('loading model...')
    checkpoint, model = load_model(args.checkpoint)
    print('running model...')
    reconstructions = run(args, model, data_loader)
    save_reconstructions(reconstructions, args.out_dir)
Exemple #2
0
def run_sn(args, data_loader, model):
    """ Run Sigmanet """
    model.eval()
    logging.info(f'Run Sigmanet reconstruction')
    logging.info(f'Arguments: {args}')
    reconstructions = defaultdict(list)
    # keys = ['input', 'kspace', 'smaps', 'mask', 'fg_mask']
    # if args.mask_bg:
    #     keys.append('input_rss_mean')
    # attr_keys = ['mean', 'cov', 'norm']

    with torch.no_grad():
        for ii, sample in enumerate(tqdm(iter(data_loader))):
            sample = data_batch._read_data(sample, device=args.device)

            rec_x = sample['attrs']['metadata']['rec_x']
            rec_y = sample['attrs']['metadata']['rec_y']

            x = model(sample['input'], sample['kspace'], sample['smaps'],
                      sample['mask'], sample['attrs'])

            recons = postprocess(x, (rec_x, rec_y))

            # mask background using background mean value
            if args.mask_bg:
                fg_mask = center_crop(
                    sample['fg_mask'],
                    (rec_x, rec_y),
                ).squeeze(1)
                if args.use_bg_noise_mean:
                    bg_mean = sample['input_rss_mean'].reshape(-1, 1, 1)
                    recons = recons * fg_mask + (1 - fg_mask) * bg_mean
                else:
                    recons = recons * fg_mask

            # renormalize
            norm = sample['attrs']['norm'].reshape(-1, 1, 1)
            recons = recons * norm

            recons = recons.to('cpu').numpy()

            if args.debug and ii % 10 == 0:
                plt.imsave(
                    'run_sn_progress.png',
                    np.hstack(recons),
                    cmap='gray',
                )

            for bidx in range(recons.shape[0]):
                reconstructions[sample['fname']].append(
                    (sample['slidx'][bidx], recons[bidx]))

    reconstructions = {
        fname: np.stack([pred for _, pred in sorted(slice_preds)])
        for fname, slice_preds in reconstructions.items()
    }

    save_reconstructions(reconstructions, args.out_dir)
Exemple #3
0
def save_outputs(outputs, output_path):
    reconstructions = defaultdict(list)
    for fname, slice, pred in outputs:
        reconstructions[fname].append((slice, pred))
    reconstructions = {
        fname: np.stack([pred for _, pred in sorted(slice_preds)])
        for fname, slice_preds in reconstructions.items()
    }
    utils.save_reconstructions(reconstructions, output_path)
Exemple #4
0
def reconstructe():
    args = create_arg_parser().parse_args(sys.argv[1:])
    args.checkpoint = f'summary/{args.test_name}/best_model.pt'
    args.out_dir = f'summary/{args.test_name}/rec'

    data_loader = create_data_loaders(args)
    model = load_model(args.checkpoint)
    reconstructions = eval(args, model, data_loader)
    save_reconstructions(reconstructions, args.out_dir)
 def test_end(self, test_logs):
     outputs = defaultdict(list)
     for log in test_logs:
         for i, (fname, slice) in enumerate(zip(log['fname'], log['slice'])):
             outputs[fname].append((slice, log['output'][i]))
     for fname in outputs:
         outputs[fname] = np.stack([out for _, out in sorted(outputs[fname])])
     save_reconstructions(outputs, self.hparams.exp_dir / self.hparams.exp / 'reconstructions')
     return dict()
def run_unet(wl=False, acc=None, cf=None, device=1):
    args = create_arg_parser(device).parse_args(sys.argv[1:])
    if acc: args.accelerations = acc
    if cf: args.center_fractions = cf
    if wl:
        args.out_dir = pathlib.Path(f'{device}rec_with')

    args.checkpoint = pathlib.Path(f'{acc[0]}_{cf[0]}_{wl}/best_model.pt')
    data_loader = create_data_loaders(args)
    model = load_model(args.checkpoint)
    reconstructions = run_unet2(args, model, data_loader)
    save_reconstructions(reconstructions, args.out_dir)
Exemple #7
0
def reconstructe():
    args = create_arg_parser().parse_args(sys.argv[1:])
    args.checkpoint = f'summary/{args.test_name}/model.pt'
    args.out_dir = f'summary/{args.test_name}/rec'

    data_loader = create_data_loaders(args)
    model = load_model(args.checkpoint)
    model.eval()
    reconstructions = eval(args, model, data_loader)
    save_reconstructions(reconstructions, args.out_dir)
    corrupted = corrupted_outputs(args, model, data_loader)
    save_reconstructions(corrupted, f'summary/{args.test_name}/corrupted')

    x = model.get_trajectory()
    x = x.detach().cpu().numpy()
    sio.savemat(f'summary/{args.test_name}/traj.mat', {'x': x})
Exemple #8
0
def run_zero_filled_sense(args, data_loader):
    """ Run Adjoint (zero-filled SENSE) reconstruction """
    logging.info('Run zero-filled SENSE reconstruction')
    logging.info(f'Arguments: {args}')
    reconstructions = defaultdict(list)

    with torch.no_grad():
        for sample in tqdm(iter(data_loader)):
            sample = data_batch._read_data(sample)

            rec_x = sample['attrs']['metadata']['rec_x']
            rec_y = sample['attrs']['metadata']['rec_y']

            x = sample['input']

            recons = postprocess(x, (rec_x, rec_y))

            # mask background using background mean value
            if args.mask_bg:
                fg_mask = center_crop(
                    sample['fg_mask'],
                    (rec_x, rec_y),
                ).squeeze(1)
                if args.use_bg_noise_mean:
                    bg_mean = sample['input_rss_mean'].reshape(-1, 1, 1)
                    recons = recons * fg_mask + (1 - fg_mask) * bg_mean
                else:
                    recons = recons * fg_mask

            # renormalize
            norm = sample['attrs']['norm'].numpy()[:, np.newaxis, np.newaxis]
            recons = recons.numpy() * norm

            for bidx in range(recons.shape[0]):
                reconstructions[sample['fname']].append(
                    (sample['slidx'][bidx], recons[bidx]))

    reconstructions = {
        fname: np.stack([pred for _, pred in sorted(slice_preds)])
        for fname, slice_preds in reconstructions.items()
    }

    save_reconstructions(reconstructions, args.out_dir)
Exemple #9
0
def save_zero_filled(data_dir, out_dir, which_challenge, resolution):
    reconstructions = {}

    for file in data_dir.iterdir():
        print("file:{}".format(file))
        with h5py.File(file, "r") as hf:
            masked_kspace = transforms.to_tensor(hf['kspace'][()])
            # Inverse Fourier Transform to get zero filled solution
            image = transforms.ifft2(masked_kspace)
            # Crop input image
            smallest_width = min(resolution, image.shape[-2])
            smallest_height = min(resolution, image.shape[-3])
            image = transforms.complex_center_crop(image, (smallest_height, smallest_width))
            # Absolute value
            image = transforms.complex_abs(image)
            # Apply Root-Sum-of-Squares if multicoil data
            if which_challenge == 'multicoil':
                image = transforms.root_sum_of_squares(image, dim=1)

            reconstructions[file.name] = image
    save_reconstructions(reconstructions, out_dir)
def main(args):
    data_loader = create_data_loaders(args)
    model = load_model(args.checkpoint)
    reconstructions = run_unet(args, model, data_loader)
    save_reconstructions(reconstructions, args.out_dir)
Exemple #11
0
def main(args):
    args.exp_dir.mkdir(parents=True, exist_ok=True)
    writer = SummaryWriter(log_dir=str(args.exp_dir / 'summary'))

    resume_flag = 0
    if args.resume:
        resume_flag = 1
        checkpoint, model, optimizer = load_model(args.checkpoint)
        args = checkpoint['args']
        best_dev_loss = checkpoint['best_dev_loss']
        start_epoch = checkpoint['epoch']
        del checkpoint
    else:
        model = build_model(args)
        if args.data_parallel:
            model = torch.nn.DataParallel(model)
        optimizer = build_optim(args, model.parameters())
        best_dev_loss = 1e9
        start_epoch = 0
    logging.info(args)
    logging.info(model)

    train_loader, dev_loader, display_loader = create_data_loaders(args)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_step_size,
                                                args.lr_gamma)

    print('----------Start Training----------')
    print('resume_flag is', resume_flag)
    if resume_flag == 0:
        print(
            'Because we run from scratch, we need to delete the previous results file'
        )
        os.remove('./models/unet/results.txt')
    f = open('./models/unet/results.txt', 'a+')
    for epoch in range(start_epoch, args.num_epochs):
        scheduler.step(epoch)
        train_loss, train_time = train_epoch(args, epoch, model, train_loader,
                                             optimizer, writer)
        dev_loss, dev_time = evaluate(args, epoch, model, dev_loader, writer)
        visualize(args, epoch, model, display_loader, writer)

        is_new_best = dev_loss < best_dev_loss
        best_dev_loss = min(best_dev_loss, dev_loss)
        save_model(args, args.exp_dir, epoch, model, optimizer, best_dev_loss,
                   is_new_best)
        print('Epoch: %d / %d' % (epoch, args.num_epochs), end=',  ')
        print('TrainLoss: %.4g' % train_loss, end=',  ')
        print('DevLoss: %.4g' % dev_loss, end=',  ')
        print('TrainTime: %.4fs' % float(train_time), end=',  ')
        print('DevTime: %.4fs' % float(dev_time))

        # get metrics
        if epoch % args.metric_interval == 0:
            #             data_loader_metrics = create_data_loaders_metrics(args)
            #             a = 10
            #             print('Now stop %ds temporarily'%a)
            #             time.sleep(a)
            print('Now start reconstructing!')
            model_metrics = load_model_metrics(args.checkpoint_metrics)
            reconstructions = run_unet(args, model_metrics, dev_loader)
            save_reconstructions(reconstructions, args.out_dir)
            recons_key = 'reconstruction_esc' if args.challenge == 'singlecoil' else 'reconstruction_rss'
            metrics = evaluate_metrics(args, recons_key)
            print(metrics)
            f.write('Epoch:{epoch}'.format(epoch=epoch) + '  ' + str(metrics) +
                    '\n')

    f.close()
    writer.close()
Exemple #12
0
def main(args):
    args.exp_dir.mkdir(parents=True, exist_ok=True)
    writer = SummaryWriter(log_dir=args.exp_dir / 'summary')

    module = __import__('models', fromlist=[args.model])
    mri_model = importlib.import_module('.'.join(['models', model_name, 'train']))

    if args.resume:
        checkpoint, model, optimizer = load_model(args.checkpoint, mri_model, args.num_volumes)
        # args = checkpoint['args']
        best_dev_loss = checkpoint['best_dev_loss']
        start_epoch = checkpoint['epoch']
        del checkpoint
    else:
        model = build_model(args)
        if args.data_parallel:
            model = torch.nn.DataParallel(model)
        optimizer = build_optim(args, model.parameters())
        best_dev_loss = 1e9
        start_epoch = 0
    logging.info(args)
    logging.info(model)
    
    loaders = create_data_loaders(args)
    if args.eval:
        train_loader, dev_loader, display_loader, test_loader = loaders
    else:
        train_loader, dev_loader, display_loader = loaders


    module = __import__('models.{}.train'.format(args.model), fromlist=[''])
    inference_func = module.inference
    if args.eval:
        reconstructions = run_model(args, model, inference_func, test_loader)
        save_reconstructions(reconstructions, pathlib.Path(args.out_dir + 'reconstructions_test/'))
        reconstructions = run_model(args, model, inference_func, dev_loader)
        save_reconstructions(reconstructions, pathlib.Path(args.out_dir + 'reconstructions_val/'))
        # reconstructions = run_model(args, model, inference_func, train_loader)
        # save_reconstructions(reconstructions, pathlib.Path(args.out_dir + 'reconstructions_train/'))

        create_submission_file(args.out_dir + 'submission.json', 'singlecoil', '', "VNet", args.exp_dir.name.split('/')[-1], True)
        return
    elif args.evaluate_only:
        dev_loss, dev_time = evaluate(args.device, args.model, model, inference_func, dev_loader, batches_per_volume=args.batches_per_volume, epoch=0, writer=None)
        writer.close()
        return
    else:
        scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_step_size, args.lr_gamma)
        train_step_func = module.train_step
        

        for epoch in range(start_epoch, args.num_epochs):
            scheduler.step(epoch)
            train_loss, train_time = train_epoch(args, epoch, model, train_step_func, train_loader, optimizer, writer)
            dev_loss, dev_time = evaluate(args.device, args.model, model, inference_func, dev_loader,
                                          batches_per_volume=args.batches_per_volume, epoch=epoch, writer=None)
            if epoch % 10 == 0:
                visualize(args, epoch, model, inference_func, display_loader, writer)

            is_new_best = dev_loss < best_dev_loss
            best_dev_loss = min(best_dev_loss, dev_loss)
            save_model(args, args.exp_dir, epoch, model, optimizer, best_dev_loss, is_new_best)
            logging.info(
                f'Epoch = [{epoch:4d}/{args.num_epochs:4d}] TrainLoss = {train_loss:.4g} '
                f'DevLoss = {dev_loss:.4g} TrainTime = {train_time:.4f}s DevTime = {dev_time:.4f}s',
            )
        writer.close()