示例#1
0
    params = xutils.Params(json_path)

    # set the three slices
    args.three_slices = not args.no_three_slices

    # set up dataset and DataLoader
    logging.info("Setting up data loaders...")
    dataloaders = {}

    # training dataset
    train_dataset = CardiacMR_2D_UKBB(params.train_data_path,
                                      seq=params.seq,
                                      seq_length=params.seq_length,
                                      augment=params.augment,
                                      transform=transforms.Compose([
                                          CenterCrop(params.crop_size),
                                          Normalise(),
                                          ToTensor()
                                      ]))
    # training dataloader
    dataloaders['train'] = DataLoader(train_dataset,
                                      batch_size=params.batch_size,
                                      shuffle=False,
                                      num_workers=args.num_workers,
                                      pin_memory=args.cuda)

    # validation dataset
    val_dataset = CardiacMR_2D_Eval_UKBB(params.val_data_path,
                                         seq=params.seq,
                                         augment=params.augment,
                                         label_prefix=params.label_prefix,
示例#2
0
logging.info('Looping over subjects...')

with tqdm(total=len(os.listdir(data_dir))) as t:
    # loop over subjects
    for subj_id in sorted(os.listdir(data_dir)):
        subj_dir = os.path.join(data_dir, subj_id)
        subj_id_buffer += [subj_id]

        # load in the ED and ES segmentation masks
        nseg_ED = nib.load(os.path.join(subj_dir, '{0}_sa_ED.nii.gz'.format(label_prefix)))
        nseg_ES = nib.load(os.path.join(subj_dir, '{0}_sa_ES.nii.gz'.format(label_prefix)))
        seg_ED = nseg_ED.get_data()
        seg_ES = nseg_ES.get_data()

        # cropping
        cropper = CenterCrop(output_size=args.crop_size)
        seg_ED_crop = cropper(seg_ED.transpose(2, 0, 1)).transpose(1, 2, 0)
        seg_ES_crop = cropper(seg_ES.transpose(2, 0, 1)).transpose(1, 2, 0)

        # three slices
        num_slices = seg_ED.shape[-1]
        slices_idx = np.arange(0, num_slices)
        if args.three_slices:
            apical_idx = int(round((num_slices - 1) * 0.75))  # 75% from basal
            mid_ven_idx = int(round((num_slices - 1) * 0.5))  # 50% from basal
            basal_idx = int(round((num_slices - 1) * 0.25))  # 25% from basal
            slices_idx = [apical_idx, mid_ven_idx, basal_idx]

            seg_ED_crop = seg_ED_crop[:, :, slices_idx]
            seg_ES_crop = seg_ES_crop[:, :, slices_idx]
示例#3
0
def inference(model, subject_data_dir, eval_data, subject_output_dir, args, params):
    """
    Run inference on one subject.

    Args:
        model: (object) instantiated model
        subject_data_dir: (string) directory of the subject's data, absolute path
        eval_data: (dict) ED and ES images and labels to evaluate metrics
        subject_output_dir: (string) save results of the subject to this dir
        args
        params

    Returns:

    """
    # set model to evaluation mode
    model.eval()

    # send model to the right device
    model = model.to(device=args.device)  # (note: this may not send all parameters)

    # --- run inference on the whole sequence --- #
    # create a dataloader to load data of one subject
    inference_dataset = CardiacMR_2D_Inference_UKBB(subject_data_dir,
                                                    seq=params.seq,
                                                    transform=transforms.Compose([
                                                        CenterCrop(params.crop_size),
                                                        Normalise(),
                                                        ToTensor()])
                                                    )

    # loop over time frames
    logging.info("Running inference calculation...")
    op_flow_list = []
    target_list = []
    source_list = []
    warped_source_list = []
    for (target, source) in inference_dataset:
        # size (N, 1, H, W) to input model
        target = target.unsqueeze(1).to(device=args.device)
        source = source.unsqueeze(1).to(device=args.device)

        # run inference
        op_flow = model(target, source)
        warped_source = resample_transform(source, op_flow)

        # move to cpu and stack
        op_flow_list += [op_flow.data.cpu().numpy().transpose(0, 2, 3, 1)]  # (N, H, W, 2)
        target_list += [target.data.squeeze(1).cpu().numpy()[:, :, :] * 255]  # (N, H, W), here N = frames -1
        source_list += [source.data.squeeze(1).cpu().numpy()[:, :, :] * 255]  # (N, H, W), here N = frames -1
        warped_source_list += [warped_source.data.squeeze(1).cpu().numpy()[:, :, :] * 255]  # (N, H, W)
    logging.info("- Done.")

    # stack on time as dim 0, shape (T, N, H, W)
    op_flow_seq = np.stack(op_flow_list, axis=0)
    target_seq = np.stack(target_list, axis=0)
    source_seq = np.stack(source_list, axis=0)
    warped_source_seq = np.stack(warped_source_list, axis=0)

    # save the flow and target sequence to a HDF5 file for lateer
    h5py_file_path = os.path.join(subject_output_dir, 'save_data.hdf5')
    if os.path.exists(h5py_file_path): os.system("rm {}".format(h5py_file_path))
    with h5py.File(h5py_file_path, "w") as f:
        f.create_dataset('op_flow_seq', data=op_flow_seq)
        f.create_dataset('target_seq', data=target_seq)

    num_slices = op_flow_seq.shape[1]
    if args.three_slices:
        apical_idx = int(round((num_slices - 1) * 0.75))  # 75% from basal
        mid_ven_idx = int(round((num_slices - 1) * 0.5))  # 50% from basal
        basal_idx = int(round((num_slices - 1) * 0.25))  # 25% from basal
        slices_idx = [apical_idx, mid_ven_idx, basal_idx]
    else:
        slices_idx = np.arange(0, num_slices)

    # loop over slices
    for slice_num in slices_idx:
        logging.info("Saving results of slice no. {}".format(slice_num))
        # shape (T, H, W) or (T, H, W, 2)
        op_flow_slice_seq = op_flow_seq[:, slice_num, :, :]
        target_slice_seq = target_seq[:, slice_num, :, :]
        source_slice_seq = source_seq[:, slice_num, :, :]
        warped_source_slice_seq = warped_source_seq[:, slice_num, :, :]

        # set up saving directory
        output_dir_slice = os.path.join(subject_output_dir, 'slice_{}'.format(slice_num))
        if not os.path.exists(output_dir_slice):
            os.makedirs(output_dir_slice)

        # loop over time frame
        png_buffer = []
        for fr in range(op_flow_slice_seq.shape[0]):
            print('Frame: {}/{}'.format(fr, op_flow_slice_seq.shape[0]))
            op_flow_fr = op_flow_slice_seq[fr, :, :, :]
            target_fr = target_slice_seq[fr, :, :]
            source_fr = source_slice_seq[fr, :, :]
            warped_source_fr = warped_source_slice_seq[fr, :, :]

            fig_save_path = os.path.join(output_dir_slice, 'frame_{}.png'.format(fr))
            plot_results(target_fr, source_fr, warped_source_fr, op_flow_fr, save_path=fig_save_path)

            # read back the PNG to save a GIF animation
            png_buffer += [imageio.imread(fig_save_path)]
        imageio.mimwrite(os.path.join(output_dir_slice, 'results.gif'), png_buffer, fps=params.fps)

        # flow_utils.save_warp_n_error(warped_source_slice_seq, target_slice_seq, source_slice_seq, output_dir_slice, fps=params.fps)
        # if args.hsv_flow:
        #     flow_utils.save_flow_hsv(op_flow_slice_seq, target_slice_seq, output_dir_slice, fps=params.fps)
        # if args.quiver:
        #     flow_utils.save_flow_quiver(op_flow_slice_seq * (params.crop_size / 2), source_slice_seq, output_dir_slice, fps=params.fps)

    if args.metrics:
        # --- evaluate motion estimation accuracy metrics ---  #
        # unpack the ED ES data Tensor inputs, transpose from (1, N, H, W) to (N, 1, H, W)
        image_ed_batch = eval_data['image_ed_batch'].permute(1, 0, 2, 3).to(device=args.device)
        image_es_batch = eval_data['image_es_batch'].permute(1, 0, 2, 3).to(device=args.device)
        label_es_batch = eval_data['label_es_batch'].permute(1, 0, 2, 3).to(device=args.device)

        # compute optical flow and warped ed images using the trained model(source, target)
        op_flow = model(image_ed_batch, image_es_batch)

        # warp ED segmentation mask to ES using nearest neighbourhood interpolation
        with torch.no_grad():
            warped_label_es_batch = resample_transform(label_es_batch.float(), op_flow, interp='nearest')

        # move data to cpu to calculate metrics (also transpose into H, W, N)
        warped_label_es_batch = warped_label_es_batch.squeeze(1).cpu().numpy().transpose(1, 2, 0)
        label_es_batch = label_es_batch.squeeze(1).cpu().numpy().transpose(1, 2, 0)
        label_ed_batch = eval_data['label_ed_batch'].squeeze(0).numpy().transpose(1, 2, 0)

        # calculate contour distance metrics, metrics functions take inputs shaped in (H, W, N)
        mcd_lv, hd_lv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=1, dx=params.pixel_size)
        mcd_myo, hd_myo = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=2, dx=params.pixel_size)
        mcd_rv, hd_rv = contour_distances_stack(warped_label_es_batch, label_ed_batch, label_class=3, dx=params.pixel_size)

        metrics = dict()
        metrics['mcd_lv'] = mcd_lv
        metrics['hd_lv'] = hd_lv
        metrics['mcd_myo'] = mcd_myo
        metrics['hd_myo'] = hd_myo
        metrics['mcd_rv'] = mcd_rv
        metrics['hd_rv'] = hd_rv

        # save the metrics to a JSON file
        metrics_save_path = os.path.join(subject_output_dir, 'metrics.json')
        xutils.save_dict_to_json(metrics, metrics_save_path)

        if args.nifti:
            # save wapred ES segmentations and original (but cropped) ED segmentation into niftis
            nim = nib.load(os.path.join(subject_data_dir, 'label_sa_ED.nii.gz'))
            nim_wapred_label_es = nib.Nifti1Image(warped_label_es_batch, nim.affine, nim.header)
            nib.save(nim_wapred_label_es, os.path.join(subject_output_dir, 'warped_label_ES.nii.gz'))
            nim_label_ed = nib.Nifti1Image(label_ed_batch, nim.affine, nim.header)
            nib.save(nim_label_ed, os.path.join(subject_output_dir, 'label_ED.nii.gz'))
            nim_label_es = nib.Nifti1Image(label_es_batch, nim.affine, nim.header)
            nib.save(nim_label_es, os.path.join(subject_output_dir, 'label_ES.nii.gz'))
示例#4
0
def inference(model, subject_data_dir, eval_data, subject_output_dir, args,
              params):
    """
    Run inference on one subject sequence

    Args:
        model: (object) instantiated model
        subject_data_dir: (string) directory of the subject's data, absolute path
        eval_data: (dict) ED and ES images and labels to evaluate metrics
        subject_output_dir: (string) save results of the subject to this dir
        args
        params

    """
    # dataloader for one subject that loads volume pairs of two consecutive frames in a sequence
    inference_dataset = CardiacMR_2D_Inference_UKBB(
        subject_data_dir,
        seq=params.seq,
        transform=transforms.Compose(
            [CenterCrop(params.crop_size),
             Normalise(),
             ToTensor()]))

    logging.info("Running inference computation...")

    dvf_buffer = []
    target_buffer = []
    source_buffer = []
    warped_source_buffer = []

    # loop over time frames
    for (target, source) in inference_dataset:
        # size (N, 1, H, W) to input model
        target = target.unsqueeze(1).to(device=args.device)
        source = source.unsqueeze(1).to(device=args.device)

        # run inference
        dvf = model(target, source)
        warped_source = resample_transform(source, dvf)

        # move to cpu & add to buffer, N = #slices
        dvf_buffer += [dvf.data.cpu().numpy().transpose(0, 2, 3,
                                                        1)]  # (N, H, W, 2),
        target_buffer += [target.data.squeeze(1).cpu().numpy()[:, :, :]
                          ]  # (N, H, W)
        source_buffer += [source.data.squeeze(1).cpu().numpy()[:, :, :]
                          ]  # (N, H, W)
        warped_source_buffer += [
            warped_source.data.squeeze(1).cpu().numpy()[:, :, :]
        ]  # (N, H, W)

    logging.info("- Done.")

    # stack on time dimension (0) => (T, N, H, W)
    dvf_seq = np.stack(dvf_buffer, axis=0)  # (T, N, H, W, 2)
    target_seq = np.stack(target_buffer, axis=0)
    source_seq = np.stack(source_buffer, axis=0)
    warped_source_seq = np.stack(warped_source_buffer, axis=0)
    """ Save output transformation and images """
    # (optional) extract 3 slices
    num_slices = dvf_seq.shape[1]
    if not args.all_slices:
        apical_idx = int(round((num_slices - 1) * 0.75))  # 75% from basal
        mid_ven_idx = int(round((num_slices - 1) * 0.5))  # 50% from basal
        basal_idx = int(round((num_slices - 1) * 0.25))  # 25% from basal
        slices_idx = [apical_idx, mid_ven_idx, basal_idx]
    else:
        slices_idx = np.arange(0, num_slices)

    # save DVF and image sequences (original and warped)
    source_save = source_seq.transpose(2, 3, 1,
                                       0)[..., slices_idx, :]  # (H, W, _N, T)
    warped_source_save = warped_source_seq.transpose(
        2, 3, 1, 0)[..., slices_idx, :]  # (H, W, _N, T)
    dvf_save = dvf_seq.transpose(2, 3, 1, 4,
                                 0)[..., slices_idx, :, :]  # (H, W, _N, 2, T)
    dvf_save[..., 0, :] *= dvf_save.shape[0] / 2
    dvf_save[
        ...,
        1, :] *= dvf_save.shape[1] / 2  # un-normalise DVF to image pixel space

    # (note: identity image2world header matrix)
    nib.save(nib.Nifti1Image(source_save, np.eye(4)),
             f"{subject_output_dir}/{params.seq}.nii.gz")
    nib.save(nib.Nifti1Image(warped_source_save, np.eye(4)),
             f"{subject_output_dir}/warped_{params.seq}.nii.gz")
    nib.save(nib.Nifti1Image(dvf_save, np.eye(4)),
             f"{subject_output_dir}/{params.seq}_dvf.nii.gz")
    """"""
    """ 
    Save visual output 
    """
    if args.visual_output:
        logging.info("Saving visual outputs (WARNING: this process is slow...")

        # loop over slices
        for slice_num in slices_idx:
            logging.info("Saving results of slice no. {}".format(slice_num))
            # shape (T, H, W) or (T, H, W, 2)
            dvf_slice_seq = dvf_seq[:, slice_num, :, :]
            target_slice_seq = target_seq[:, slice_num, :, :]
            source_slice_seq = source_seq[:, slice_num, :, :]
            warped_source_slice_seq = warped_source_seq[:, slice_num, :, :]

            # set up saving directory
            output_dir_slice = os.path.join(subject_output_dir,
                                            'slice_{}'.format(slice_num))
            if not os.path.exists(output_dir_slice):
                os.makedirs(output_dir_slice)

            # loop over time frame
            png_buffer = []
            for fr in range(dvf_slice_seq.shape[0]):
                print('Frame: {}/{}'.format(fr, dvf_slice_seq.shape[0]))
                dvf_fr = dvf_slice_seq[fr, :, :, :]
                target_fr = target_slice_seq[fr, :, :]
                source_fr = source_slice_seq[fr, :, :]
                warped_source_fr = warped_source_slice_seq[fr, :, :]

                fig_save_path = os.path.join(output_dir_slice,
                                             'frame_{}.png'.format(fr))
                plot_results(target_fr,
                             source_fr,
                             warped_source_fr,
                             dvf_fr,
                             save_path=fig_save_path)

                # read back the PNG to save a GIF animation
                png_buffer += [imageio.imread(fig_save_path)]
            imageio.mimwrite(os.path.join(output_dir_slice, 'results.gif'),
                             png_buffer,
                             fps=params.fps)
    """"""
    """ 
    Evaulate motion estimation accuracy metrics for each subject
    (NOTE: only works with SAX images) 
    """
    if args.metrics:
        # unpack the ED ES data Tensor inputs, transpose from (1, N, H, W) to (N, 1, H, W)
        image_ed_batch = eval_data['image_ed_batch'].permute(
            1, 0, 2, 3).to(device=args.device)
        image_es_batch = eval_data['image_es_batch'].permute(
            1, 0, 2, 3).to(device=args.device)
        label_es_batch = eval_data['label_es_batch'].permute(
            1, 0, 2, 3).to(device=args.device)

        # compute optical flow and warped ed images using the trained model(source, target)
        dvf = model(image_ed_batch, image_es_batch)

        # warp ED segmentation mask to ES using nearest neighbourhood interpolation
        with torch.no_grad():
            warped_label_es_batch = resample_transform(label_es_batch.float(),
                                                       dvf,
                                                       interp='nearest')

        # move data to cpu to calculate metrics (also transpose into H, W, N)
        warped_label_es_batch = warped_label_es_batch.squeeze(
            1).cpu().numpy().transpose(1, 2, 0)
        label_es_batch = label_es_batch.squeeze(1).cpu().numpy().transpose(
            1, 2, 0)
        label_ed_batch = eval_data['label_ed_batch'].squeeze(
            0).numpy().transpose(1, 2, 0)

        # calculate contour distance metrics, metrics functions take inputs shaped in (H, W, N)
        mcd_lv, hd_lv = contour_distances_stack(warped_label_es_batch,
                                                label_ed_batch,
                                                label_class=1,
                                                dx=params.pixel_size)
        mcd_myo, hd_myo = contour_distances_stack(warped_label_es_batch,
                                                  label_ed_batch,
                                                  label_class=2,
                                                  dx=params.pixel_size)
        mcd_rv, hd_rv = contour_distances_stack(warped_label_es_batch,
                                                label_ed_batch,
                                                label_class=3,
                                                dx=params.pixel_size)

        metrics = dict()
        metrics['mcd_lv'] = mcd_lv
        metrics['hd_lv'] = hd_lv
        metrics['mcd_myo'] = mcd_myo
        metrics['hd_myo'] = hd_myo
        metrics['mcd_rv'] = mcd_rv
        metrics['hd_rv'] = hd_rv

        # save the metrics to a JSON file
        metrics_save_path = os.path.join(subject_output_dir, 'metrics.json')
        xutils.save_dict_to_json(metrics, metrics_save_path)

        # save wapred ES segmentations and original (but cropped) ED segmentation into NIFTIs
        nim = nib.load(os.path.join(subject_data_dir, 'label_sa_ED.nii.gz'))
        nim_wapred_label_es = nib.Nifti1Image(warped_label_es_batch,
                                              nim.affine, nim.header)
        nib.save(nim_wapred_label_es,
                 os.path.join(subject_output_dir, 'warped_label_ES.nii.gz'))
        nim_label_ed = nib.Nifti1Image(label_ed_batch, nim.affine, nim.header)
        nib.save(nim_label_ed,
                 os.path.join(subject_output_dir, 'label_ED.nii.gz'))
        nim_label_es = nib.Nifti1Image(label_es_batch, nim.affine, nim.header)
        nib.save(nim_label_es,
                 os.path.join(subject_output_dir, 'label_ES.nii.gz'))