def process(image, brain_mask, args, logger):
    img = io.open_nii(image)
    mask = io.open_nii(brain_mask)
    dirname, base, ext = io.split_filename(image)
    if args.output_dir is not None:
        dirname = args.output_dir
        if not os.path.exists(dirname):
            logger.info('Making output directory: {}'.format(dirname))
            os.mkdir(dirname)
    if args.find_background_mask:
        bg_mask = background_mask(img)
        bgfile = os.path.join(dirname, base + '_bgmask' + ext)
        io.save_nii(bg_mask, bgfile, is_nii=True)
    if args.wm_peak is not None:
        logger.info('Loading WM peak: ', args.wm_peak)
        peak = float(np.load(args.wm_peak))
    else:
        peak = gmm_class_mask(img, brain_mask=mask, contrast=args.contrast)
        if args.save_wm_peak:
            np.save(os.path.join(dirname, base + '_wmpeak.npy'), peak)
    normalized = gmm.gmm_normalize(img, mask, args.norm_value, args.contrast,
                                   args.background_mask, peak)
    outfile = os.path.join(dirname, base + '_gmm' + ext)
    logger.info('Normalized image saved: {}'.format(outfile))
    io.save_nii(normalized, outfile, is_nii=True)
def run_intensity_fcm(infile, outfolder):

    from intensity_normalization.normalize import fcm
    from intensity_normalization.utilities import io

    try:
        if not exists(join(outfolder, "Robex")):
            makedirs(join(outfolder, "Robex"))

        if not exists(join(outfolder, "wm_masks")):
            makedirs(join(outfolder, "wm_masks"))

        filename = infile.split(sep)[-1].split(".")[0]
        i = io.open_nii(infile)
        b_mask = io.open_nii(
            join(outfolder, "robex_masks", filename + "_mask.nii.gz"))
        wm_mask = fcm.find_tissue_mask(i, b_mask)
        normalized = fcm.fcm_normalize(i, wm_mask)
        io.save_nii(wm_mask,
                    join(outfolder, "wm_masks", filename + "_wmmask.nii.gz"))
        io.save_nii(normalized, join(outfolder, filename + "_fcm.nii.gz"))
        shutil.move(join(infile), join(outfolder, "Robex"))
    except:
        e = sys.exc_info()
        print("Error: ", str(e[0]))
        print("Error: ", str(e[1]))
        print("Error: executing fcm method")
        sys.exit(2)
Пример #3
0
 def setUp(self):
     wd = os.path.dirname(os.path.abspath(__file__))
     self.data_dir = os.path.join(wd, 'test_data', 'images')
     self.mask_dir = os.path.join(wd, 'test_data', 'masks')
     self.img = io.open_nii(os.path.join(self.data_dir, 'test.nii.gz'))
     self.brain_mask = io.open_nii(
         os.path.join(self.mask_dir, 'mask.nii.gz'))
Пример #4
0
def train(img_fns, mask_fns=None, i_min=1, i_max=99, i_s_min=1, i_s_max=100, l_percentile=10, u_percentile=90, step=10):
    """
    determine the standard scale for the set of images

    Args:
        img_fns (list): set of NifTI MR image paths which are to be normalized
        mask_fns (list): set of corresponding masks (if not provided, estimated)
        i_min (float): minimum percentile to consider in the images
        i_max (float): maximum percentile to consider in the images
        i_s_min (float): minimum percentile on the standard scale
        i_s_max (float): maximum percentile on the standard scale
        l_percentile (int): middle percentile lower bound (e.g., for deciles 10)
        u_percentile (int): middle percentile upper bound (e.g., for deciles 90)
        step (int): step for middle percentiles (e.g., for deciles 10)

    Returns:
        standard_scale (np.ndarray): average landmark intensity for images
        percs (np.ndarray): array of all percentiles used
    """
    mask_fns = [None] * len(img_fns) if mask_fns is None else mask_fns
    percs = np.concatenate(([i_min], np.arange(l_percentile, u_percentile+1, step), [i_max]))
    standard_scale = np.zeros(len(percs))
    for i, (img_fn, mask_fn) in enumerate(zip(img_fns, mask_fns)):
        img_data = io.open_nii(img_fn).get_data()
        mask = io.open_nii(mask_fn) if mask_fn is not None else None
        mask_data = img_data > img_data.mean() if mask is None else mask.get_data()
        masked = img_data[mask_data > 0]
        landmarks = get_landmarks(masked, percs)
        min_p = np.percentile(masked, i_min)
        max_p = np.percentile(masked, i_max)
        f = interp1d([min_p, max_p], [i_s_min, i_s_max])
        landmarks = np.array(f(landmarks))
        standard_scale += landmarks
    standard_scale = standard_scale / len(img_fns)
    return standard_scale, percs
Пример #5
0
def nyul_normalize(img_dir, mask_dir=None, output_dir=None, standard_hist=None, write_to_disk=True):
    """
    Use Nyul and Udupa method ([1,2]) to normalize the intensities of a set of MR images

    Args:
        img_dir (str): directory containing MR images
        img_dir (str): directory containing masks for MR images
        output_dir (str): directory to save images if you do not want them saved in
            same directory as data_dir
        standard_hist (str): path to output or use standard histogram landmarks
        write_to_disk (bool): write the normalized data to disk or nah

    Returns:
        normalized (np.ndarray): last normalized image from img_dir

    References:
        [1] N. Laszlo G and J. K. Udupa, “On Standardizing the MR Image
            Intensity Scale,” Magn. Reson. Med., vol. 42, pp. 1072–1081,
            1999.
        [2] M. Shah, Y. Xiao, N. Subbanna, S. Francis, D. L. Arnold,
            D. L. Collins, and T. Arbel, “Evaluating intensity
            normalization on MRIs of human brain with multiple sclerosis,”
            Med. Image Anal., vol. 15, no. 2, pp. 267–282, 2011.
    """
    input_files = io.glob_nii(img_dir)
    if output_dir is None:
        out_fns = [None] * len(input_files)
    else:
        out_fns = []
        for fn in input_files:
            _, base, ext = io.split_filename(fn)
            out_fns.append(os.path.join(output_dir, base + '_hm' + ext))
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

    mask_files = [None] * len(input_files) if mask_dir is None else io.glob_nii(mask_dir)

    if standard_hist is None:
        logger.info('Learning standard scale for the set of images')
        standard_scale, percs = train(input_files, mask_files)
    elif not os.path.isfile(standard_hist):
        logger.info('Learning standard scale for the set of images')
        standard_scale, percs = train(input_files, mask_files)
        np.save(standard_hist, np.vstack((standard_scale, percs)))
    else:
        logger.info('Loading standard scale ({}) for the set of images'.format(standard_hist))
        standard_scale, percs = np.load(standard_hist)

    for i, (img_fn, mask_fn, out_fn) in enumerate(zip(input_files, mask_files, out_fns)):
        _, base, _ = io.split_filename(img_fn)
        logger.info('Transforming image {} to standard scale ({:d}/{:d})'.format(base, i+1, len(input_files)))
        img = io.open_nii(img_fn)
        mask = io.open_nii(mask_fn) if mask_fn is not None else None
        normalized = do_hist_norm(img, percs, standard_scale, mask)
        if write_to_disk:
            io.save_nii(normalized, out_fn, is_nii=True)

    return normalized
Пример #6
0
 def setUp(self):
     wd = os.path.dirname(os.path.abspath(__file__))
     self.data_dir = os.path.join(wd, 'test_data', 'images')
     self.mask_dir = os.path.join(wd, 'test_data', 'masks')
     self.img = io.open_nii(os.path.join(self.data_dir, 'test.nii.gz'))
     self.brain_mask = io.open_nii(
         os.path.join(self.mask_dir, 'mask.nii.gz'))
     self.template_mask = os.path.join(self.mask_dir, 'mask.nii.gz')
     self.wm_mask = fcm.find_wm_mask(self.img, self.brain_mask)
     self.norm_val = 1000
Пример #7
0
def lsq_normalize(img_dir, mask_dir=None, output_dir=None, write_to_disk=True):
    """
    normalize intensities of a set of MR images by minimizing the squared distance
    between CSF, GM, and WM means within the set

    Args:
        img_dir (str): directory containing MR images
        mask_dir (str): directory containing masks for MR images
        output_dir (str): directory to save images if you do not want them saved in
            same directory as data_dir
        write_to_disk (bool): write the normalized data to disk or nah

    Returns:
        normalized (np.ndarray): last normalized image from img_dir
    """
    input_files = io.glob_nii(img_dir)
    if output_dir is None:
        out_fns = [None] * len(input_files)
    else:
        out_fns = []
        for fn in input_files:
            _, base, ext = io.split_filename(fn)
            out_fns.append(os.path.join(output_dir, base + '_lsq' + ext))
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

    mask_files = [None] * len(
        input_files) if mask_dir is None else io.glob_nii(mask_dir)

    standard_tissue_means = None
    normalized = None
    for i, (img_fn, mask_fn,
            out_fn) in enumerate(zip(input_files, mask_files, out_fns)):
        _, base, _ = io.split_filename(img_fn)
        logger.info(
            'Transforming image {} to standard scale ({:d}/{:d})'.format(
                base, i + 1, len(input_files)))
        img = io.open_nii(img_fn)
        mask = io.open_nii(mask_fn) if mask_fn is not None else None
        tissue_mem = mask_util.fcm_class_mask(img, mask)
        if standard_tissue_means is None:
            csf_tissue_mask = find_tissue_mask(img, mask, tissue_type='csf')
            csf_normed_data = fcm_normalize(img, csf_tissue_mask).get_fdata()
            standard_tissue_means = calc_tissue_means(csf_normed_data,
                                                      tissue_mem)
            del csf_tissue_mask, csf_normed_data
        img_data = img.get_fdata()
        tissue_means = calc_tissue_means(img_data, tissue_mem)
        sf = find_scaling_factor(tissue_means, standard_tissue_means)
        logger.debug('Scaling factor for {}: {:0.3e}'.format(base, sf))
        normalized = nib.Nifti1Image(sf * img_data, img.affine, img.header)
        if write_to_disk:
            io.save_nii(normalized, out_fn, is_nii=True)

    return normalized
def main(args=None):
    args = arg_parser().parse_args(args)
    if args.verbosity == 1:
        level = logging.getLevelName('INFO')
    elif args.verbosity >= 2:
        level = logging.getLevelName('DEBUG')
    else:
        level = logging.getLevelName('WARNING')
    logging.basicConfig(
        format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        level=level)
    logger = logging.getLogger(__name__)
    try:
        if not os.path.isdir(args.img_dir):
            raise ValueError(
                '(-i / --img-dir) argument needs to be a directory of NIfTI images.'
            )
        if args.mask_dir is not None:
            if not os.path.isdir(args.mask_dir):
                raise ValueError(
                    '(-m / --mask-dir) argument needs to be a directory of NIfTI images.'
                )

        img_fns = io.glob_nii(args.img_dir)
        if args.mask_dir is not None:
            mask_fns = io.glob_nii(args.mask_dir)
        else:
            mask_fns = [None] * len(img_fns)
        if not os.path.exists(args.output_dir):
            logger.info('Making Output Directory: {}'.format(args.output_dir))
            os.mkdir(args.output_dir)
        hard_seg = not args.memberships
        for i, (img_fn, mask_fn) in enumerate(zip(img_fns, mask_fns), 1):
            _, base, _ = io.split_filename(img_fn)
            logger.info('Creating Mask for Image: {}, ({:d}/{:d})'.format(
                base, i, len(img_fns)))
            img = io.open_nii(img_fn)
            mask = io.open_nii(mask_fn)
            tm = fcm_class_mask(img, mask,
                                hard_seg) if not args.gmm else gmm_class_mask(
                                    img, mask, 't1', False, hard_seg)
            tissue_mask = os.path.join(args.output_dir, base + '_tm')
            if args.memberships:
                classes = ('csf', 'gm', 'wm')
                for j, c in enumerate(classes):
                    io.save_nii(img, tissue_mask + '_' + c + '.nii.gz', tm[...,
                                                                           j])
            else:
                io.save_nii(img, tissue_mask + '.nii.gz', tm)
        return 0
    except Exception as e:
        logger.exception(e)
        return 1
Пример #9
0
def process(image_fn, brain_mask_fn, args, logger):
    img = io.open_nii(image_fn)
    if args.brain_mask is not None:
        mask = io.open_nii(brain_mask_fn)
    else:
        mask = None
    dirname, base, _ = io.split_filename(image_fn)
    if args.output_dir is not None:
        dirname = args.output_dir
        if not os.path.exists(dirname):
            logger.info('Making output directory: {}'.format(dirname))
            os.mkdir(dirname)
    normalized = kde.kde_normalize(img, mask, args.contrast, args.norm_value)
    outfile = os.path.join(dirname, base + '_kde.nii.gz')
    logger.info('Normalized image saved: {}'.format(outfile))
    io.save_nii(normalized, outfile, is_nii=True)
def match_histograms(reference_path, apply_to_path):

    print('Start')

    for name in ['InPhase', 'OutPhase']:

        print(f'Working on: {name}')
        ref_files = list()
        for folder in os.listdir(reference_path):
            ref_files.append(f'{reference_path}/{folder}/{name}.nii')

        print('Training')
        mask_files = [None] * len(ref_files)
        standard_scale, percs = nyul.train(ref_files, mask_files)

        input_files, output_files = list(), list()
        for folder in os.listdir(apply_to_path):
            input_files.append(f'{apply_to_path}/{folder}/{name}.nii')
            output_files.append(f'{apply_to_path}/{folder}/{name}_HM.nii')

        print('Normalizing')
        for img_fn, out_fn in zip(input_files, output_files):
            print(img_fn, '->', out_fn)
            _, base, _ = io.split_filename(img_fn)
            img = io.open_nii(img_fn)
            normalized = nyul.do_hist_norm(img,
                                           percs,
                                           standard_scale,
                                           mask=None)
            io.save_nii(normalized, out_fn, is_nii=True)

    print('Done')
Пример #11
0
def process(image_fn, brain_mask_fn, output_dir, logger):
    img = io.open_nii(image_fn)
    dirname, base, _ = io.split_filename(image_fn)
    if output_dir is not None:
        dirname = output_dir
        if not os.path.exists(dirname):
            logger.info('Making output directory: {}'.format(dirname))
            os.mkdir(dirname)
    if brain_mask_fn is None:
        mask = None
    else:
        if brain_mask_fn == 'nomask':
            mask = 'nomask'
        else:
            mask = io.open_nii(brain_mask_fn)
    normalized = zscore.zscore_normalize(img, mask)
    outfile = os.path.join(dirname, base + '_zscore.nii.gz')
    logger.info('Normalized image saved: {}'.format(outfile))
    io.save_nii(normalized, outfile, is_nii=True)
Пример #12
0
def process(image_fn, brain_mask_fn, wm_mask_fn, output_dir, args, logger):
    img = io.open_nii(image_fn)
    dirname, base, _ = io.split_filename(image_fn)
    if output_dir is not None:
        dirname = output_dir
        if not os.path.exists(dirname):
            logger.info('Making output directory: {}'.format(dirname))
            os.mkdir(dirname)
    if brain_mask_fn is not None:
        mask = io.open_nii(brain_mask_fn)
        wm_mask = fcm.find_wm_mask(img, mask)
        outfile = os.path.join(dirname, base + '_wmmask.nii.gz')
        io.save_nii(wm_mask, outfile, is_nii=True)
    if wm_mask_fn is not None:
        wm_mask = io.open_nii(wm_mask_fn)
        normalized = fcm.fcm_normalize(img, wm_mask, args.norm_value)
        outfile = os.path.join(dirname, base + '_fcm.nii.gz')
        logger.info('Normalized image saved: {}'.format(outfile))
        io.save_nii(normalized, outfile, is_nii=True)
def run_intensity_gmm(infile, outfolder):
    from intensity_normalization.normalize import gmm
    from intensity_normalization.utilities import io
    try:
        if not exists(join(outfolder, "Robex")):
            makedirs(join(outfolder, "Robex"))

        filename = infile.split(sep)[-1].split(".")[0]
        i = io.open_nii(join(outfolder, infile))
        b_mask = io.open_nii(
            join(outfolder, "robex_masks", filename + "_mask.nii.gz"))
        normalized = gmm.gmm_normalize(i, b_mask)
        io.save_nii(normalized, join(outfolder, filename + "_gmm.nii.gz"))
        shutil.move(join(outfolder, infile), join(outfolder, "Robex"))
    except:
        e = sys.exc_info()
        print("Error: ", str(e[0]))
        print("Error: ", str(e[1]))
        print("Error: executing gmm method")
        sys.exit(2)
def run_intensity_ws(infile, outfolder):

    from intensity_normalization.normalize import whitestripe
    from intensity_normalization.utilities import io

    try:
        filename = infile.split(sep)[-1].split(".")[0]

        if not exists(join(outfolder, "Robex")):
            makedirs(join(outfolder, "Robex"))

        print('running intensity white stripe...')
        mask = whitestripe.whitestripe(io.open_nii(infile), "T1")
        normalized = whitestripe.whitestripe_norm(io.open_nii(infile), mask)
        io.save_nii(normalized, join(outfolder, filename + "_ws.nii.gz"))
        shutil.move(infile, join(outfolder, "Robex"))
    except:
        e = sys.exc_info()
        print("Error: ", str(e[0]))
        print("Error: ", str(e[1]))
        print("Error: executing white stripe method")
        sys.exit(2)
def run_intensity_zscore(infile, outfolder):

    from intensity_normalization.normalize import zscore
    from intensity_normalization.utilities import io
    try:
        filename = infile.split(sep)[-1].split(".")[0]
        i = io.open_nii(infile)
        b_mask = io.open_nii(
            join(outfolder, "robex_masks", filename + "_mask.nii.gz"))

        if not exists(join(outfolder, "Robex")):
            makedirs(join(outfolder, "Robex"))

        print('running intensity zscore...')
        normalized = zscore.zscore_normalize(i, b_mask)
        io.save_nii(normalized, join(outfolder, filename + "_zscore.nii.gz"))
        shutil.move(join(infile), join(outfolder, "Robex"))
    except:
        e = sys.exc_info()
        print("Error: ", str(e[0]))
        print("Error: ", str(e[1]))
        print("Error: executing z-score method")
        sys.exit(2)
Пример #16
0
def image_matrix_to_images(V, imgs):
    """
    convert an image matrix to a list of the correctly formated nifti images

    Args:
        V (np.ndarray): image matrix (rows are voxels, columns are images)
        imgs (list): list of paths to corresponding MR images in V

    Returns:
        img_list (list): list of nifti images extracted from V
    """
    img_list = []
    for i, img_fn in enumerate(imgs):
        img = io.open_nii(img_fn)
        nimg = nib.Nifti1Image(V[:, i].reshape(img.get_data().shape),
                               img.affine, img.header)
        img_list.append(nimg)
    return img_list
def ws_normalize(img_dir,
                 contrast,
                 mask_dir=None,
                 output_dir=None,
                 write_to_disk=True):
    """
    Use WhiteStripe normalization method ([1]) to normalize the intensities of
    a set of MR images by normalizing an area around the white matter peak of the histogram

    Args:
        img_dir (str): directory containing MR images to be normalized
        contrast (str): contrast of MR images to be normalized (T1, T2, or FLAIR)
        mask_dir (str): if images are not skull-stripped, then provide brain mask
        output_dir (str): directory to save images if you do not want them saved in
            same directory as img_dir
        write_to_disk (bool): write the normalized data to disk or nah

    Returns:
        normalized (np.ndarray): last normalized image data from img_dir
            I know this is an odd behavior, but yolo

    References:
        [1] R. T. Shinohara, E. M. Sweeney, J. Goldsmith, N. Shiee,
            F. J. Mateen, P. A. Calabresi, S. Jarso, D. L. Pham,
            D. S. Reich, and C. M. Crainiceanu, “Statistical normalization
            techniques for magnetic resonance imaging,” NeuroImage Clin.,
            vol. 6, pp. 9–19, 2014.
    """

    # grab the file names for the images of interest
    data = io.glob_nii(img_dir)

    # define and get the brain masks for the images, if defined
    if mask_dir is None:
        masks = [None] * len(data)
    else:
        masks = io.glob_nii(mask_dir)
        if len(data) != len(masks):
            raise NormalizationError(
                'Number of images and masks must be equal, Images: {}, Masks: {}'
                .format(len(data), len(masks)))

    # define the output directory and corresponding output file names
    if output_dir is None:
        output_files = [None] * len(data)
    else:
        output_files = []
        for fn in data:
            _, base, ext = io.split_filename(fn)
            output_files.append(os.path.join(output_dir, base + '_ws' + ext))
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

    # do whitestripe normalization and save the results
    for i, (img_fn, mask_fn,
            output_fn) in enumerate(zip(data, masks, output_files), 1):
        logger.info('Normalizing image: {} ({:d}/{:d})'.format(
            img_fn, i, len(data)))
        img = io.open_nii(img_fn)
        mask = io.open_nii(mask_fn) if mask_fn is not None else None
        indices = whitestripe(img, contrast, mask=mask)
        normalized = whitestripe_norm(img, indices)
        if write_to_disk:
            logger.info('Saving normalized image: {} ({:d}/{:d})'.format(
                output_fn, i, len(data)))
            io.save_nii(normalized, output_fn)

    # output the last normalized image (mostly for testing purposes)
    return normalized
def run_intensity_ravel(outfolder):
    from intensity_normalization.normalize import ravel
    from intensity_normalization.utilities import io, csf

    try:
        images = []
        brainMasks = []
        csfMasks = []
        _, _, filenames = next(walk(outfolder))
        for f in filenames:
            filename = f.split(sep)[-1].split(".")[0]
            images.append(io.open_nii(join(outfolder, f.split(sep)[-1])))
            brainMasks.append(
                io.open_nii(
                    join(outfolder, 'robex_masks', filename + "_mask.nii.gz")))

        if not exists(join(outfolder, "Robex")):
            makedirs(join(outfolder, "Robex"))

        if not exists(join(outfolder, "csf_masks")):
            makedirs(join(outfolder, "csf_masks"))

        print("creating csf masks...")
        for image, brainMask, f in zip(images, brainMasks, filenames):
            filename = f.split(sep)[-1].split(".")[0]
            csfMask = csf.csf_mask(image,
                                   brainMask,
                                   contrast='T1',
                                   csf_thresh=0.9,
                                   return_prob=False,
                                   mrf=0.25,
                                   use_fcm=False)
            output = nib.Nifti1Image(csfMask, None)
            io.save_nii(
                output,
                join(outfolder, 'csf_masks', filename + "_csfmask.nii.gz"))
            shutil.move(join(outfolder,
                             f.split(sep)[-1]), join(outfolder, "Robex"))

        print('running intensity ravel...')
        ravel.ravel_normalize(join(outfolder, 'Robex'),
                              join(outfolder, 'csf_masks'),
                              'T1',
                              output_dir=outfolder,
                              write_to_disk=True,
                              do_whitestripe=True,
                              b=1,
                              membership_thresh=0.99,
                              segmentation_smoothness=0.25,
                              do_registration=False,
                              use_fcm=True,
                              sparse_svd=False,
                              csf_masks=True)

        for i in filenames:
            rename(
                join(outfolder,
                     i.split(sep)[-1]),
                join(outfolder,
                     i.split(sep)[-1].split(".")[0] + "_RAVEL.nii.gz"))
    except:
        e = sys.exc_info()
        print("Error: ", str(e[0]))
        print("Error: ", str(e[1]))
        print("Error: executing ravel method")
        sys.exit(2)
Пример #19
0
def ravel_normalize(img_dir,
                    mask_dir,
                    contrast,
                    output_dir=None,
                    write_to_disk=False,
                    do_whitestripe=True,
                    b=1,
                    membership_thresh=0.99,
                    segmentation_smoothness=0.25,
                    do_registration=False,
                    use_fcm=True):
    """
    Use RAVEL [1] to normalize the intensities of a set of MR images to eliminate
    unwanted technical variation in images (but, hopefully, preserve biological variation)

    this function has an option that is modified from [1] in where no registration is done,
    the control mask is defined dynamically by finding a tissue segmentation of the brain and
    thresholding the membership at a very high level (this seems to work well and is *much* faster)
    but there seems to be some more inconsistency in the results

    Args:
        img_dir (str): directory containing MR images to be normalized
        mask_dir (str): brain masks for imgs
        contrast (str): contrast of MR images to be normalized (T1, T2, or FLAIR)
        output_dir (str): directory to save images if you do not want them saved in
            same directory as data_dir
        write_to_disk (bool): write the normalized data to disk or nah
        do_whitestripe (bool): whitestripe normalize the images before applying RAVEL correction
        b (int): number of unwanted factors to estimate
        membership_thresh (float): threshold of membership for control voxels
        segmentation_smoothness (float): segmentation smoothness parameter for atropos ANTsPy
            segmentation scheme (i.e., mrf parameter)
        do_registration (bool): deformably register images to find control mask
        use_fcm (bool): use FCM for segmentation instead of atropos (may be less accurate)

    Returns:
        Z (np.ndarray): unwanted factors (used in ravel correction)
        normalized (np.ndarray): set of normalized images from data_dir

    References:
        [1] J. P. Fortin, E. M. Sweeney, J. Muschelli, C. M. Crainiceanu,
            and R. T. Shinohara, “Removing inter-subject technical variability
            in magnetic resonance imaging studies,” Neuroimage, vol. 132,
            pp. 198–212, 2016.
    """
    img_fns = io.glob_nii(img_dir)
    mask_fns = io.glob_nii(mask_dir)

    if output_dir is None or not write_to_disk:
        out_fns = None
    else:
        out_fns = []
        for fn in img_fns:
            _, base, ext = io.split_filename(fn)
            out_fns.append(os.path.join(output_dir, base + ext))
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

    # get parameters necessary and setup the V array
    V, Vc = image_matrix(img_fns,
                         contrast,
                         masks=mask_fns,
                         do_whitestripe=do_whitestripe,
                         return_ctrl_matrix=True,
                         membership_thresh=membership_thresh,
                         do_registration=do_registration,
                         smoothness=segmentation_smoothness,
                         use_fcm=use_fcm)

    # estimate the unwanted factors Z
    _, _, vh = np.linalg.svd(Vc)
    Z = vh.T[:, 0:b]

    # perform the ravel correction
    V_norm = ravel_correction(V, Z)

    # save the results to disk if desired
    if write_to_disk:
        for i, (img_fn, out_fn) in enumerate(zip(img_fns, out_fns)):
            img = io.open_nii(img_fn)
            norm = V_norm[:, i].reshape(img.get_data().shape)
            io.save_nii(img, out_fn, data=norm)

    return Z, V_norm
Пример #20
0
def image_matrix(imgs,
                 contrast,
                 masks=None,
                 do_whitestripe=True,
                 return_ctrl_matrix=False,
                 membership_thresh=0.99,
                 smoothness=0.25,
                 max_ctrl_vox=10000,
                 do_registration=False,
                 ctrl_prob=1,
                 use_fcm=False):
    """
    creates an matrix of images where the rows correspond the the voxels of
    each image and the columns are the images

    Args:
        imgs (list): list of paths to MR images of interest
        contrast (str): contrast of the set of imgs (e.g., T1)
        masks (list or str): list of corresponding brain masks or just one (template) mask
        do_whitestripe (bool): do whitestripe on the images before storing in matrix or nah
        return_ctrl_matrix (bool): return control matrix for imgs (i.e., a subset of V's rows)
        membership_thresh (float): threshold of membership for control voxels (want this very high)
            this option is only used if the registration is turned off
        smoothness (float): smoothness parameter for segmentation for control voxels
            this option is only used if the registration is turned off
        max_ctrl_vox (int): maximum number of control voxels (if too high, everything
            crashes depending on available memory) only used if do_registration is false
        do_registration (bool): register the images together and take the intersection of the csf
            masks (as done in the original paper, note that this takes much longer)
        ctrl_prob (float): given all data, proportion of data labeled as csf to be
            used for intersection (i.e., when do_registration is true)
        use_fcm (bool): use FCM for segmentation instead of atropos (may be less accurate)

    Returns:
        V (np.ndarray): image matrix (rows are voxels, columns are images)
        Vc (np.ndarray): image matrix of control voxels (rows are voxels, columns are images)
            Vc only returned if return_ctrl_matrix is True
    """
    img_shape = io.open_nii(imgs[0]).get_data().shape
    V = np.zeros((int(np.prod(img_shape)), len(imgs)))

    if return_ctrl_matrix:
        ctrl_vox = []

    if masks is None and return_ctrl_matrix:
        raise NormalizationError(
            'Brain masks must be provided if returning control memberships')
    if masks is None:
        masks = [None] * len(imgs)

    for i, (img_fn, mask_fn) in enumerate(zip(imgs, masks)):
        _, base, _ = io.split_filename(img_fn)
        img = io.open_nii(img_fn)
        mask = io.open_nii(mask_fn) if mask_fn is not None else None
        # do whitestripe on the image before applying RAVEL (if desired)
        if do_whitestripe:
            logger.info('Applying WhiteStripe to image {} ({:d}/{:d})'.format(
                base, i + 1, len(imgs)))
            inds = whitestripe(img, contrast, mask)
            img = whitestripe_norm(img, inds)
        img_data = img.get_data()
        if img_data.shape != img_shape:
            raise NormalizationError(
                'Cannot normalize because image {} needs to have same dimension '
                'as all other images ({} != {})'.format(
                    base, img_data.shape, img_shape))
        V[:, i] = img_data.flatten()
        if return_ctrl_matrix:
            if do_registration and i == 0:
                logger.info(
                    'Creating control mask for image {} ({:d}/{:d})'.format(
                        base, i + 1, len(imgs)))
                verbose = True if logger.getEffectiveLevel(
                ) == logging.getLevelName('DEBUG') else False
                ctrl_masks = []
                reg_imgs = []
                reg_imgs.append(csf.nibabel_to_ants(img))
                ctrl_masks.append(
                    csf.csf_mask(img,
                                 mask,
                                 contrast=contrast,
                                 csf_thresh=membership_thresh,
                                 mrf=smoothness,
                                 use_fcm=use_fcm))
            elif do_registration and i != 0:
                template = ants.image_read(imgs[0])
                tmask = ants.image_read(masks[0])
                img = csf.nibabel_to_ants(img)
                logger.info(
                    'Starting registration for image {} ({:d}/{:d})'.format(
                        base, i + 1, len(imgs)))
                reg_result = ants.registration(template,
                                               img,
                                               type_of_transform='SyN',
                                               mask=tmask,
                                               verbose=verbose)
                img = reg_result['warpedmovout']
                mask = csf.nibabel_to_ants(mask)
                reg_imgs.append(img)
                logger.info(
                    'Creating control mask for image {} ({:d}/{:d})'.format(
                        base, i + 1, len(imgs)))
                ctrl_masks.append(
                    csf.csf_mask(img,
                                 mask,
                                 contrast=contrast,
                                 csf_thresh=membership_thresh,
                                 mrf=smoothness,
                                 use_fcm=use_fcm))
            else:
                logger.info(
                    'Finding control voxels for image {} ({:d}/{:d})'.format(
                        base, i + 1, len(imgs)))
                ctrl_mask = csf.csf_mask(img,
                                         mask,
                                         contrast=contrast,
                                         csf_thresh=membership_thresh,
                                         mrf=smoothness,
                                         use_fcm=use_fcm)
                if np.sum(ctrl_mask) == 0:
                    raise NormalizationError(
                        'No control voxels found for image ({}) at threshold ({})'
                        .format(base, membership_thresh))
                elif np.sum(ctrl_mask) < 100:
                    logger.warning(
                        'Few control voxels found ({:d}) (potentially a problematic image ({}) or '
                        'threshold ({}) too high)'.format(
                            int(np.sum(ctrl_mask)), base, membership_thresh))
                ctrl_vox.append(img_data[ctrl_mask == 1].flatten())

    if return_ctrl_matrix and not do_registration:
        min_len = min(min(map(len, ctrl_vox)), max_ctrl_vox)
        logger.info('Using {:d} control voxels'.format(min_len))
        Vc = np.zeros((min_len, len(imgs)))
        for i in range(len(imgs)):
            ctrl_voxs = ctrl_vox[i][:min_len]
            logger.info(
                'Image {:d} control voxel stats -  mean: {:.3f}, std: {:.3f}'.
                format(i + 1, np.mean(ctrl_voxs), np.std(ctrl_voxs)))
            Vc[:, i] = ctrl_voxs
    elif return_ctrl_matrix and do_registration:
        ctrl_sum = reduce(
            add,
            ctrl_masks)  # need to use reduce instead of sum b/c data structure
        intersection = np.zeros(ctrl_sum.shape)
        intersection[ctrl_sum >= np.floor(len(ctrl_masks) * ctrl_prob)] = 1
        num_ctrl_vox = int(np.sum(intersection))
        Vc = np.zeros((num_ctrl_vox, len(imgs)))
        for i, img in enumerate(reg_imgs):
            ctrl_voxs = img.numpy()[intersection == 1]
            logger.info(
                'Image {:d} control voxel stats -  mean: {:.3f}, std: {:.3f}'.
                format(i + 1, np.mean(ctrl_voxs), np.std(ctrl_voxs)))
            Vc[:, i] = ctrl_voxs
        del ctrl_masks, reg_imgs
        import gc
        gc.collect(
        )  # force a garbage collection, since we just used the majority of the system memory

    return V if not return_ctrl_matrix else (V, Vc)